#define GET_SUBTARGETINFO_CTOR
#include "RISCVGenSubtargetInfo.inc"
+static cl::opt<bool> EnableSubRegLiveness("riscv-enable-subreg-liveness",
+ cl::init(false), cl::Hidden);
+
static cl::opt<int> RVVVectorBitsMax(
"riscv-v-vector-bits-max",
cl::desc("Assume V extension vector registers are at most this big, "
bool RISCVSubtarget::useRVVForFixedLengthVectors() const {
return hasVInstructions() && getMinRVVVectorSizeInBits() != 0;
}
+
+bool RISCVSubtarget::enableSubRegLiveness() const {
+ if (EnableSubRegLiveness.getNumOccurrences())
+ return EnableSubRegLiveness;
+ // Enable subregister liveness for RVV to better handle LMUL>1 and segment
+ // load/store.
+ return hasVInstructions();
+}
unsigned getMinRVVVectorSizeInBits() const;
unsigned getMaxLMULForFixedLengthVectors() const;
bool useRVVForFixedLengthVectors() const;
+
+ bool enableSubRegLiveness() const override;
};
} // End llvm namespace
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv64 -mattr=+f,+m,+zfh,+experimental-zvfh < %s | FileCheck %s
+; RUN: llc -mtriple=riscv64 -mattr=+f,+m,+zfh,+experimental-zvfh \
+; RUN: -riscv-enable-subreg-liveness=false < %s | FileCheck %s
+; RUN: llc -mtriple=riscv64 -mattr=+f,+m,+zfh,+experimental-zvfh < %s \
+; RUN: | FileCheck %s --check-prefix=SUBREGLIVENESS
; This testcase failed to compile after
; c46aab01c002b7a04135b8b7f1f52d8c9ae23a58, which was reverted.
; CHECK-NEXT: ld s0, 16(sp) # 8-byte Folded Reload
; CHECK-NEXT: addi sp, sp, 32
; CHECK-NEXT: ret
+;
+; SUBREGLIVENESS-LABEL: last_chance_recoloring_failure:
+; SUBREGLIVENESS: # %bb.0: # %entry
+; SUBREGLIVENESS-NEXT: addi sp, sp, -32
+; SUBREGLIVENESS-NEXT: .cfi_def_cfa_offset 32
+; SUBREGLIVENESS-NEXT: sd ra, 24(sp) # 8-byte Folded Spill
+; SUBREGLIVENESS-NEXT: sd s0, 16(sp) # 8-byte Folded Spill
+; SUBREGLIVENESS-NEXT: .cfi_offset ra, -8
+; SUBREGLIVENESS-NEXT: .cfi_offset s0, -16
+; SUBREGLIVENESS-NEXT: csrr a0, vlenb
+; SUBREGLIVENESS-NEXT: slli a0, a0, 4
+; SUBREGLIVENESS-NEXT: sub sp, sp, a0
+; SUBREGLIVENESS-NEXT: li a0, 55
+; SUBREGLIVENESS-NEXT: vsetvli zero, a0, e16, m4, ta, mu
+; SUBREGLIVENESS-NEXT: vloxseg2ei32.v v8, (a0), v8
+; SUBREGLIVENESS-NEXT: csrr a0, vlenb
+; SUBREGLIVENESS-NEXT: slli a0, a0, 3
+; SUBREGLIVENESS-NEXT: add a0, sp, a0
+; SUBREGLIVENESS-NEXT: addi a0, a0, 16
+; SUBREGLIVENESS-NEXT: csrr a1, vlenb
+; SUBREGLIVENESS-NEXT: slli a1, a1, 2
+; SUBREGLIVENESS-NEXT: vs4r.v v8, (a0) # Unknown-size Folded Spill
+; SUBREGLIVENESS-NEXT: add a0, a0, a1
+; SUBREGLIVENESS-NEXT: vs4r.v v12, (a0) # Unknown-size Folded Spill
+; SUBREGLIVENESS-NEXT: vsetvli a0, zero, e8, m2, ta, mu
+; SUBREGLIVENESS-NEXT: vmclr.m v0
+; SUBREGLIVENESS-NEXT: li s0, 36
+; SUBREGLIVENESS-NEXT: vsetvli zero, s0, e16, m4, tu, mu
+; SUBREGLIVENESS-NEXT: vfwadd.vv v8, v8, v8, v0.t
+; SUBREGLIVENESS-NEXT: addi a0, sp, 16
+; SUBREGLIVENESS-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill
+; SUBREGLIVENESS-NEXT: call func@plt
+; SUBREGLIVENESS-NEXT: li a0, 32
+; SUBREGLIVENESS-NEXT: vsetvli zero, a0, e16, m4, tu, mu
+; SUBREGLIVENESS-NEXT: vrgather.vv v16, v8, v8, v0.t
+; SUBREGLIVENESS-NEXT: vsetvli zero, s0, e16, m4, ta, mu
+; SUBREGLIVENESS-NEXT: csrr a1, vlenb
+; SUBREGLIVENESS-NEXT: slli a1, a1, 3
+; SUBREGLIVENESS-NEXT: add a1, sp, a1
+; SUBREGLIVENESS-NEXT: addi a1, a1, 16
+; SUBREGLIVENESS-NEXT: csrr a2, vlenb
+; SUBREGLIVENESS-NEXT: slli a2, a2, 2
+; SUBREGLIVENESS-NEXT: vl4r.v v20, (a1) # Unknown-size Folded Reload
+; SUBREGLIVENESS-NEXT: add a1, a1, a2
+; SUBREGLIVENESS-NEXT: vl4r.v v24, (a1) # Unknown-size Folded Reload
+; SUBREGLIVENESS-NEXT: addi a1, sp, 16
+; SUBREGLIVENESS-NEXT: vl8re8.v v24, (a1) # Unknown-size Folded Reload
+; SUBREGLIVENESS-NEXT: vfwsub.wv v8, v24, v20
+; SUBREGLIVENESS-NEXT: vsetvli zero, a0, e16, m4, tu, mu
+; SUBREGLIVENESS-NEXT: vssubu.vv v16, v16, v8, v0.t
+; SUBREGLIVENESS-NEXT: vsetvli zero, s0, e32, m8, tu, mu
+; SUBREGLIVENESS-NEXT: vfdiv.vv v8, v24, v8, v0.t
+; SUBREGLIVENESS-NEXT: vse32.v v8, (a0)
+; SUBREGLIVENESS-NEXT: csrr a0, vlenb
+; SUBREGLIVENESS-NEXT: slli a0, a0, 4
+; SUBREGLIVENESS-NEXT: add sp, sp, a0
+; SUBREGLIVENESS-NEXT: ld ra, 24(sp) # 8-byte Folded Reload
+; SUBREGLIVENESS-NEXT: ld s0, 16(sp) # 8-byte Folded Reload
+; SUBREGLIVENESS-NEXT: addi sp, sp, 32
+; SUBREGLIVENESS-NEXT: ret
entry:
%i = call { <vscale x 16 x half>, <vscale x 16 x half> } @llvm.riscv.vloxseg2.nxv16f16.nxv16i32.i64(half* nonnull poison, <vscale x 16 x i32> poison, i64 55)
%i1 = extractvalue { <vscale x 16 x half>, <vscale x 16 x half> } %i, 0
define <vscale x 4 x i32> @extract_nxv8i32_nxv4i32_0(<vscale x 8 x i32> %vec) {
; CHECK-LABEL: extract_nxv8i32_nxv4i32_0:
; CHECK: # %bb.0:
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m4
; CHECK-NEXT: ret
%c = call <vscale x 4 x i32> @llvm.experimental.vector.extract.nxv4i32.nxv8i32(<vscale x 8 x i32> %vec, i64 0)
ret <vscale x 4 x i32> %c
define <vscale x 2 x i32> @extract_nxv8i32_nxv2i32_0(<vscale x 8 x i32> %vec) {
; CHECK-LABEL: extract_nxv8i32_nxv2i32_0:
; CHECK: # %bb.0:
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8m4
; CHECK-NEXT: ret
%c = call <vscale x 2 x i32> @llvm.experimental.vector.extract.nxv2i32.nxv8i32(<vscale x 8 x i32> %vec, i64 0)
ret <vscale x 2 x i32> %c
define <vscale x 8 x i32> @extract_nxv16i32_nxv8i32_0(<vscale x 16 x i32> %vec) {
; CHECK-LABEL: extract_nxv16i32_nxv8i32_0:
; CHECK: # %bb.0:
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m8
; CHECK-NEXT: ret
%c = call <vscale x 8 x i32> @llvm.experimental.vector.extract.nxv8i32.nxv16i32(<vscale x 16 x i32> %vec, i64 0)
ret <vscale x 8 x i32> %c
define <vscale x 4 x i32> @extract_nxv16i32_nxv4i32_0(<vscale x 16 x i32> %vec) {
; CHECK-LABEL: extract_nxv16i32_nxv4i32_0:
; CHECK: # %bb.0:
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m8
; CHECK-NEXT: ret
%c = call <vscale x 4 x i32> @llvm.experimental.vector.extract.nxv4i32.nxv16i32(<vscale x 16 x i32> %vec, i64 0)
ret <vscale x 4 x i32> %c
define <vscale x 2 x i32> @extract_nxv16i32_nxv2i32_0(<vscale x 16 x i32> %vec) {
; CHECK-LABEL: extract_nxv16i32_nxv2i32_0:
; CHECK: # %bb.0:
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8m8
; CHECK-NEXT: ret
%c = call <vscale x 2 x i32> @llvm.experimental.vector.extract.nxv2i32.nxv16i32(<vscale x 16 x i32> %vec, i64 0)
ret <vscale x 2 x i32> %c
define <vscale x 1 x i32> @extract_nxv16i32_nxv1i32_0(<vscale x 16 x i32> %vec) {
; CHECK-LABEL: extract_nxv16i32_nxv1i32_0:
; CHECK: # %bb.0:
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8m8
; CHECK-NEXT: ret
%c = call <vscale x 1 x i32> @llvm.experimental.vector.extract.nxv1i32.nxv16i32(<vscale x 16 x i32> %vec, i64 0)
ret <vscale x 1 x i32> %c
define <vscale x 2 x i8> @extract_nxv32i8_nxv2i8_0(<vscale x 32 x i8> %vec) {
; CHECK-LABEL: extract_nxv32i8_nxv2i8_0:
; CHECK: # %bb.0:
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8m4
; CHECK-NEXT: ret
%c = call <vscale x 2 x i8> @llvm.experimental.vector.extract.nxv2i8.nxv32i8(<vscale x 32 x i8> %vec, i64 0)
ret <vscale x 2 x i8> %c
define <vscale x 2 x half> @extract_nxv2f16_nxv16f16_0(<vscale x 16 x half> %vec) {
; CHECK-LABEL: extract_nxv2f16_nxv16f16_0:
; CHECK: # %bb.0:
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8m4
; CHECK-NEXT: ret
%c = call <vscale x 2 x half> @llvm.experimental.vector.extract.nxv2f16.nxv16f16(<vscale x 16 x half> %vec, i64 0)
ret <vscale x 2 x half> %c
define <vscale x 6 x half> @extract_nxv6f16_nxv12f16_0(<vscale x 12 x half> %in) {
; CHECK-LABEL: extract_nxv6f16_nxv12f16_0:
; CHECK: # %bb.0:
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m4
; CHECK-NEXT: ret
%res = call <vscale x 6 x half> @llvm.experimental.vector.extract.nxv6f16.nxv12f16(<vscale x 12 x half> %in, i64 0)
ret <vscale x 6 x half> %res
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: srli a0, a0, 2
; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, mu
-; CHECK-NEXT: vslidedown.vx v14, v10, a0
-; CHECK-NEXT: vslidedown.vx v12, v9, a0
+; CHECK-NEXT: vslidedown.vx v11, v10, a0
+; CHECK-NEXT: vslidedown.vx v8, v9, a0
; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
-; CHECK-NEXT: vslideup.vi v13, v14, 0
+; CHECK-NEXT: vslideup.vi v9, v11, 0
; CHECK-NEXT: add a1, a0, a0
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
-; CHECK-NEXT: vslideup.vx v12, v10, a0
-; CHECK-NEXT: vmv2r.v v8, v12
+; CHECK-NEXT: vslideup.vx v8, v10, a0
; CHECK-NEXT: ret
%res = call <vscale x 6 x half> @llvm.experimental.vector.extract.nxv6f16.nxv12f16(<vscale x 12 x half> %in, i64 6)
ret <vscale x 6 x half> %res
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu
; CHECK-NEXT: vlseg2e8.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
%1 = bitcast <16 x i8>* %ptr to i8*
%2 = call { <8 x i8>, <8 x i8> } @llvm.riscv.seg2.load.v8i8.p0i8.i64(i8* %1, i64 8)
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu
; CHECK-NEXT: vlseg3e8.v v6, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v6_v7_v8
; CHECK-NEXT: ret
%1 = bitcast <24 x i8>* %ptr to i8*
%2 = call { <8 x i8>, <8 x i8>, <8 x i8> } @llvm.riscv.seg3.load.v8i8.p0i8.i64(i8* %1, i64 8)
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu
; CHECK-NEXT: vlseg4e8.v v5, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v5_v6_v7_v8
; CHECK-NEXT: ret
%1 = bitcast <32 x i8>* %ptr to i8*
%2 = call { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } @llvm.riscv.seg4.load.v8i8.p0i8.i64(i8* %1, i64 8)
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu
; CHECK-NEXT: vlseg5e8.v v4, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v4_v5_v6_v7_v8
; CHECK-NEXT: ret
%1 = bitcast <40 x i8>* %ptr to i8*
%2 = call { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } @llvm.riscv.seg5.load.v8i8.p0i8.i64(i8* %1, i64 8)
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu
; CHECK-NEXT: vlseg6e8.v v3, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v3_v4_v5_v6_v7_v8
; CHECK-NEXT: ret
%1 = bitcast <48 x i8>* %ptr to i8*
%2 = call { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } @llvm.riscv.seg6.load.v8i8.p0i8.i64(i8* %1, i64 8)
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu
; CHECK-NEXT: vlseg7e8.v v2, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v2_v3_v4_v5_v6_v7_v8
; CHECK-NEXT: ret
%1 = bitcast <56 x i8>* %ptr to i8*
%2 = call { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } @llvm.riscv.seg7.load.v8i8.p0i8.i64(i8* %1, i64 8)
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu
; CHECK-NEXT: vlseg8e8.v v1, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v1_v2_v3_v4_v5_v6_v7_v8
; CHECK-NEXT: ret
%1 = bitcast <64 x i8>* %ptr to i8*
%2 = call { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } @llvm.riscv.seg8.load.v8i8.p0i8.i64(i8* %1, i64 8)
; RV32-V128-LABEL: interleave_v2f64:
; RV32-V128: # %bb.0:
; RV32-V128-NEXT: vmv1r.v v12, v9
-; RV32-V128-NEXT: # kill: def $v8 killed $v8 def $v8m2
; RV32-V128-NEXT: vsetivli zero, 4, e16, mf2, ta, mu
-; RV32-V128-NEXT: vid.v v10
-; RV32-V128-NEXT: vsrl.vi v14, v10, 1
+; RV32-V128-NEXT: vid.v v9
+; RV32-V128-NEXT: vsrl.vi v9, v9, 1
; RV32-V128-NEXT: vsetvli zero, zero, e64, m2, ta, mu
-; RV32-V128-NEXT: vrgatherei16.vv v10, v8, v14
+; RV32-V128-NEXT: vrgatherei16.vv v10, v8, v9
; RV32-V128-NEXT: li a0, 10
; RV32-V128-NEXT: vsetivli zero, 1, e8, mf8, ta, mu
; RV32-V128-NEXT: vmv.s.x v0, a0
; RV32-V128-NEXT: vsetivli zero, 4, e64, m2, ta, mu
-; RV32-V128-NEXT: vrgatherei16.vv v10, v12, v14, v0.t
+; RV32-V128-NEXT: vrgatherei16.vv v10, v12, v9, v0.t
; RV32-V128-NEXT: vmv.v.v v8, v10
; RV32-V128-NEXT: ret
;
; RV64-V128-LABEL: interleave_v2f64:
; RV64-V128: # %bb.0:
; RV64-V128-NEXT: vmv1r.v v12, v9
-; RV64-V128-NEXT: # kill: def $v8 killed $v8 def $v8m2
; RV64-V128-NEXT: vsetivli zero, 4, e64, m2, ta, mu
; RV64-V128-NEXT: vid.v v10
; RV64-V128-NEXT: vsrl.vi v14, v10, 1
; RV32-V128-NEXT: vsetvli zero, a1, e32, m8, ta, mu
; RV32-V128-NEXT: vle32.v v0, (a0)
; RV32-V128-NEXT: vmv8r.v v24, v8
-; RV32-V128-NEXT: addi a0, sp, 16
-; RV32-V128-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill
; RV32-V128-NEXT: vrgather.vv v8, v24, v0
+; RV32-V128-NEXT: addi a0, sp, 16
+; RV32-V128-NEXT: vs8r.v v24, (a0) # Unknown-size Folded Spill
; RV32-V128-NEXT: lui a0, %hi(.LCPI10_1)
; RV32-V128-NEXT: addi a0, a0, %lo(.LCPI10_1)
; RV32-V128-NEXT: vle32.v v24, (a0)
; RV64-V128-NEXT: vsetvli zero, a1, e32, m8, ta, mu
; RV64-V128-NEXT: vle32.v v0, (a0)
; RV64-V128-NEXT: vmv8r.v v24, v8
-; RV64-V128-NEXT: addi a0, sp, 16
-; RV64-V128-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill
; RV64-V128-NEXT: vrgather.vv v8, v24, v0
+; RV64-V128-NEXT: addi a0, sp, 16
+; RV64-V128-NEXT: vs8r.v v24, (a0) # Unknown-size Folded Spill
; RV64-V128-NEXT: lui a0, %hi(.LCPI10_1)
; RV64-V128-NEXT: addi a0, a0, %lo(.LCPI10_1)
; RV64-V128-NEXT: vle32.v v24, (a0)
; LMULMAX2-NEXT: vsetivli zero, 16, e8, m2, ta, mu
; LMULMAX2-NEXT: vslidedown.vi v10, v8, 16
; LMULMAX2-NEXT: vsetivli zero, 8, e8, m1, ta, mu
-; LMULMAX2-NEXT: vslidedown.vi v14, v10, 8
+; LMULMAX2-NEXT: vslidedown.vi v9, v10, 8
; LMULMAX2-NEXT: vsetivli zero, 8, e32, m2, ta, mu
-; LMULMAX2-NEXT: vsext.vf4 v16, v14
-; LMULMAX2-NEXT: vsext.vf4 v14, v8
+; LMULMAX2-NEXT: vsext.vf4 v14, v9
+; LMULMAX2-NEXT: vsext.vf4 v16, v8
; LMULMAX2-NEXT: vsext.vf4 v8, v10
; LMULMAX2-NEXT: addi a0, a1, 64
; LMULMAX2-NEXT: vse32.v v8, (a0)
-; LMULMAX2-NEXT: vse32.v v14, (a1)
+; LMULMAX2-NEXT: vse32.v v16, (a1)
; LMULMAX2-NEXT: addi a0, a1, 96
-; LMULMAX2-NEXT: vse32.v v16, (a0)
+; LMULMAX2-NEXT: vse32.v v14, (a0)
; LMULMAX2-NEXT: addi a0, a1, 32
; LMULMAX2-NEXT: vse32.v v12, (a0)
; LMULMAX2-NEXT: ret
; RV32-V128-LABEL: interleave_v2i64:
; RV32-V128: # %bb.0:
; RV32-V128-NEXT: vmv1r.v v12, v9
-; RV32-V128-NEXT: # kill: def $v8 killed $v8 def $v8m2
; RV32-V128-NEXT: vsetivli zero, 4, e16, mf2, ta, mu
-; RV32-V128-NEXT: vid.v v10
-; RV32-V128-NEXT: vsrl.vi v14, v10, 1
+; RV32-V128-NEXT: vid.v v9
+; RV32-V128-NEXT: vsrl.vi v9, v9, 1
; RV32-V128-NEXT: vsetvli zero, zero, e64, m2, ta, mu
-; RV32-V128-NEXT: vrgatherei16.vv v10, v8, v14
+; RV32-V128-NEXT: vrgatherei16.vv v10, v8, v9
; RV32-V128-NEXT: li a0, 10
; RV32-V128-NEXT: vsetivli zero, 1, e8, mf8, ta, mu
; RV32-V128-NEXT: vmv.s.x v0, a0
; RV32-V128-NEXT: vsetivli zero, 4, e64, m2, ta, mu
-; RV32-V128-NEXT: vrgatherei16.vv v10, v12, v14, v0.t
+; RV32-V128-NEXT: vrgatherei16.vv v10, v12, v9, v0.t
; RV32-V128-NEXT: vmv.v.v v8, v10
; RV32-V128-NEXT: ret
;
; RV64-V128-LABEL: interleave_v2i64:
; RV64-V128: # %bb.0:
; RV64-V128-NEXT: vmv1r.v v12, v9
-; RV64-V128-NEXT: # kill: def $v8 killed $v8 def $v8m2
; RV64-V128-NEXT: vsetivli zero, 4, e64, m2, ta, mu
; RV64-V128-NEXT: vid.v v10
; RV64-V128-NEXT: vsrl.vi v14, v10, 1
; RV32-V128-NEXT: vsetvli zero, a1, e32, m8, ta, mu
; RV32-V128-NEXT: vle32.v v0, (a0)
; RV32-V128-NEXT: vmv8r.v v24, v8
-; RV32-V128-NEXT: addi a0, sp, 16
-; RV32-V128-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill
; RV32-V128-NEXT: vrgather.vv v8, v24, v0
+; RV32-V128-NEXT: addi a0, sp, 16
+; RV32-V128-NEXT: vs8r.v v24, (a0) # Unknown-size Folded Spill
; RV32-V128-NEXT: lui a0, %hi(.LCPI15_1)
; RV32-V128-NEXT: addi a0, a0, %lo(.LCPI15_1)
; RV32-V128-NEXT: vle32.v v24, (a0)
; RV64-V128-NEXT: vsetvli zero, a1, e32, m8, ta, mu
; RV64-V128-NEXT: vle32.v v0, (a0)
; RV64-V128-NEXT: vmv8r.v v24, v8
-; RV64-V128-NEXT: addi a0, sp, 16
-; RV64-V128-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill
; RV64-V128-NEXT: vrgather.vv v8, v24, v0
+; RV64-V128-NEXT: addi a0, sp, 16
+; RV64-V128-NEXT: vs8r.v v24, (a0) # Unknown-size Folded Spill
; RV64-V128-NEXT: lui a0, %hi(.LCPI15_1)
; RV64-V128-NEXT: addi a0, a0, %lo(.LCPI15_1)
; RV64-V128-NEXT: vle32.v v24, (a0)
; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu
; CHECK-NEXT: vslidedown.vx v8, v16, a0
; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu
-; CHECK-NEXT: vfmv.s.f v24, fa0
+; CHECK-NEXT: vfmv.s.f v12, fa0
; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
-; CHECK-NEXT: vfwredosum.vs v16, v16, v24
+; CHECK-NEXT: vfwredosum.vs v12, v16, v12
; CHECK-NEXT: vsetivli zero, 0, e32, m1, ta, mu
-; CHECK-NEXT: vfmv.f.s ft0, v16
+; CHECK-NEXT: vfmv.f.s ft0, v12
; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, mu
-; CHECK-NEXT: vfmv.s.f v16, ft0
+; CHECK-NEXT: vfmv.s.f v12, ft0
; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
-; CHECK-NEXT: vfwredosum.vs v8, v8, v16
+; CHECK-NEXT: vfwredosum.vs v8, v8, v12
; CHECK-NEXT: vsetivli zero, 0, e32, m1, ta, mu
; CHECK-NEXT: vfmv.f.s fa0, v8
; CHECK-NEXT: ret
; CHECK-NEXT: vsetivli zero, 16, e32, m8, ta, mu
; CHECK-NEXT: vslidedown.vi v8, v16, 16
; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu
-; CHECK-NEXT: vfmv.s.f v24, fa0
+; CHECK-NEXT: vfmv.s.f v12, fa0
; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, mu
-; CHECK-NEXT: vfwredosum.vs v16, v16, v24
+; CHECK-NEXT: vfwredosum.vs v12, v16, v12
; CHECK-NEXT: vsetivli zero, 0, e64, m1, ta, mu
-; CHECK-NEXT: vfmv.f.s ft0, v16
+; CHECK-NEXT: vfmv.f.s ft0, v12
; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu
-; CHECK-NEXT: vfmv.s.f v16, ft0
+; CHECK-NEXT: vfmv.s.f v12, ft0
; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, mu
-; CHECK-NEXT: vfwredosum.vs v8, v8, v16
+; CHECK-NEXT: vfwredosum.vs v8, v8, v12
; CHECK-NEXT: vsetivli zero, 0, e64, m1, ta, mu
; CHECK-NEXT: vfmv.f.s fa0, v8
; CHECK-NEXT: ret
; RV32-NEXT: li a2, 32
; RV32-NEXT: vsetvli zero, a2, e32, m8, ta, mu
; RV32-NEXT: vle32.v v8, (a0)
-; RV32-NEXT: addi a0, sp, 16
-; RV32-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill
; RV32-NEXT: vle32.v v16, (a1)
; RV32-NEXT: vsetivli zero, 16, e32, m8, ta, mu
; RV32-NEXT: vslidedown.vi v24, v8, 16
+; RV32-NEXT: addi a0, sp, 16
+; RV32-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill
; RV32-NEXT: csrr a0, vlenb
; RV32-NEXT: li a1, 24
; RV32-NEXT: mul a0, a0, a1
; RV64-NEXT: li a2, 32
; RV64-NEXT: vsetvli zero, a2, e32, m8, ta, mu
; RV64-NEXT: vle32.v v8, (a0)
-; RV64-NEXT: addi a0, sp, 16
-; RV64-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill
; RV64-NEXT: vle32.v v16, (a1)
; RV64-NEXT: vsetivli zero, 16, e32, m8, ta, mu
; RV64-NEXT: vslidedown.vi v24, v8, 16
+; RV64-NEXT: addi a0, sp, 16
+; RV64-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill
; RV64-NEXT: csrr a0, vlenb
; RV64-NEXT: li a1, 24
; RV64-NEXT: mul a0, a0, a1
; RV32-NEXT: li a2, 32
; RV32-NEXT: vsetvli zero, a2, e32, m8, ta, mu
; RV32-NEXT: vle32.v v8, (a0)
-; RV32-NEXT: addi a0, sp, 16
-; RV32-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill
; RV32-NEXT: vle32.v v16, (a1)
; RV32-NEXT: vsetivli zero, 16, e32, m8, ta, mu
; RV32-NEXT: vslidedown.vi v24, v8, 16
+; RV32-NEXT: addi a0, sp, 16
+; RV32-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill
; RV32-NEXT: csrr a0, vlenb
; RV32-NEXT: li a1, 24
; RV32-NEXT: mul a0, a0, a1
; RV64-NEXT: li a2, 32
; RV64-NEXT: vsetvli zero, a2, e32, m8, ta, mu
; RV64-NEXT: vle32.v v8, (a0)
-; RV64-NEXT: addi a0, sp, 16
-; RV64-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill
; RV64-NEXT: vle32.v v16, (a1)
; RV64-NEXT: vsetivli zero, 16, e32, m8, ta, mu
; RV64-NEXT: vslidedown.vi v24, v8, 16
+; RV64-NEXT: addi a0, sp, 16
+; RV64-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill
; RV64-NEXT: csrr a0, vlenb
; RV64-NEXT: li a1, 24
; RV64-NEXT: mul a0, a0, a1
define <vscale x 32 x half> @insert_nxv32f16_undef_nxv1f16_0(<vscale x 1 x half> %subvec) {
; CHECK-LABEL: insert_nxv32f16_undef_nxv1f16_0:
; CHECK: # %bb.0:
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8m8
; CHECK-NEXT: ret
%v = call <vscale x 32 x half> @llvm.experimental.vector.insert.nxv1f16.nxv32f16(<vscale x 32 x half> undef, <vscale x 1 x half> %subvec, i64 0)
ret <vscale x 32 x half> %v
; CHECK-NEXT: srli a0, a0, 2
; CHECK-NEXT: add a1, a0, a1
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
-; CHECK-NEXT: vslideup.vx v22, v8, a0
-; CHECK-NEXT: vmv8r.v v8, v16
+; CHECK-NEXT: vslideup.vx v14, v8, a0
; CHECK-NEXT: ret
%v = call <vscale x 32 x half> @llvm.experimental.vector.insert.nxv1f16.nxv32f16(<vscale x 32 x half> undef, <vscale x 1 x half> %subvec, i64 26)
ret <vscale x 32 x half> %v
; SPILL-O2-NEXT: vl1r.v v7, (a0) # Unknown-size Folded Reload
; SPILL-O2-NEXT: add a0, a0, a1
; SPILL-O2-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; SPILL-O2-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; SPILL-O2-NEXT: csrr a0, vlenb
; SPILL-O2-NEXT: slli a0, a0, 1
; SPILL-O2-NEXT: add sp, sp, a0
; SPILL-O2-NEXT: vl1r.v v7, (a0) # Unknown-size Folded Reload
; SPILL-O2-NEXT: add a0, a0, a1
; SPILL-O2-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; SPILL-O2-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; SPILL-O2-NEXT: csrr a0, vlenb
; SPILL-O2-NEXT: slli a0, a0, 1
; SPILL-O2-NEXT: add sp, sp, a0
; SPILL-O2-NEXT: vl2r.v v6, (a0) # Unknown-size Folded Reload
; SPILL-O2-NEXT: add a0, a0, a1
; SPILL-O2-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
-; SPILL-O2-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
; SPILL-O2-NEXT: csrr a0, vlenb
; SPILL-O2-NEXT: slli a0, a0, 2
; SPILL-O2-NEXT: add sp, sp, a0
; SPILL-O2-NEXT: vl4r.v v4, (a0) # Unknown-size Folded Reload
; SPILL-O2-NEXT: add a0, a0, a1
; SPILL-O2-NEXT: vl4r.v v8, (a0) # Unknown-size Folded Reload
-; SPILL-O2-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
; SPILL-O2-NEXT: csrr a0, vlenb
; SPILL-O2-NEXT: slli a0, a0, 3
; SPILL-O2-NEXT: add sp, sp, a0
; SPILL-O2-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
; SPILL-O2-NEXT: add a0, a0, a1
; SPILL-O2-NEXT: vl2r.v v10, (a0) # Unknown-size Folded Reload
-; SPILL-O2-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2
; SPILL-O2-NEXT: csrr a0, vlenb
; SPILL-O2-NEXT: li a1, 6
; SPILL-O2-NEXT: mul a0, a0, a1
; SPILL-O2-NEXT: vl1r.v v7, (a0) # Unknown-size Folded Reload
; SPILL-O2-NEXT: add a0, a0, a1
; SPILL-O2-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; SPILL-O2-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; SPILL-O2-NEXT: csrr a0, vlenb
; SPILL-O2-NEXT: slli a0, a0, 1
; SPILL-O2-NEXT: add sp, sp, a0
; SPILL-O2-NEXT: vl1r.v v7, (a0) # Unknown-size Folded Reload
; SPILL-O2-NEXT: add a0, a0, a1
; SPILL-O2-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; SPILL-O2-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; SPILL-O2-NEXT: csrr a0, vlenb
; SPILL-O2-NEXT: slli a0, a0, 1
; SPILL-O2-NEXT: add sp, sp, a0
; SPILL-O2-NEXT: vl2r.v v6, (a0) # Unknown-size Folded Reload
; SPILL-O2-NEXT: add a0, a0, a1
; SPILL-O2-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
-; SPILL-O2-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
; SPILL-O2-NEXT: csrr a0, vlenb
; SPILL-O2-NEXT: slli a0, a0, 2
; SPILL-O2-NEXT: add sp, sp, a0
; SPILL-O2-NEXT: vl4r.v v4, (a0) # Unknown-size Folded Reload
; SPILL-O2-NEXT: add a0, a0, a1
; SPILL-O2-NEXT: vl4r.v v8, (a0) # Unknown-size Folded Reload
-; SPILL-O2-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
; SPILL-O2-NEXT: csrr a0, vlenb
; SPILL-O2-NEXT: slli a0, a0, 3
; SPILL-O2-NEXT: add sp, sp, a0
; SPILL-O2-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
; SPILL-O2-NEXT: add a0, a0, a1
; SPILL-O2-NEXT: vl2r.v v10, (a0) # Unknown-size Folded Reload
-; SPILL-O2-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2
; SPILL-O2-NEXT: csrr a0, vlenb
; SPILL-O2-NEXT: li a1, 6
; SPILL-O2-NEXT: mul a0, a0, a1
; CHECK-NEXT: vmv4r.v v4, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
; CHECK-NEXT: vloxseg2ei16.v v4, (a0), v12, v0.t
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 16 x i16>,<vscale x 16 x i16>} @llvm.riscv.vloxseg2.mask.nxv16i16.nxv16i16(<vscale x 16 x i16> %val,<vscale x 16 x i16> %val, i16* %base, <vscale x 16 x i16> %index, <vscale x 16 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vmv4r.v v4, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
; CHECK-NEXT: vloxseg2ei8.v v4, (a0), v12, v0.t
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 16 x i16>,<vscale x 16 x i16>} @llvm.riscv.vloxseg2.mask.nxv16i16.nxv16i8(<vscale x 16 x i16> %val,<vscale x 16 x i16> %val, i16* %base, <vscale x 16 x i8> %index, <vscale x 16 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vmv4r.v v4, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
; CHECK-NEXT: vloxseg2ei32.v v4, (a0), v16, v0.t
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 16 x i16>,<vscale x 16 x i16>} @llvm.riscv.vloxseg2.mask.nxv16i16.nxv16i32(<vscale x 16 x i16> %val,<vscale x 16 x i16> %val, i16* %base, <vscale x 16 x i32> %index, <vscale x 16 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vloxseg2ei8.v v7, (a0), v9, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vloxseg2.mask.nxv1i8.nxv1i8(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vloxseg2ei32.v v7, (a0), v9, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vloxseg2.mask.nxv1i8.nxv1i32(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vloxseg2ei16.v v7, (a0), v9, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vloxseg2.mask.nxv1i8.nxv1i16(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
define <vscale x 1 x i8> @test_vloxseg3_mask_nxv1i8_nxv1i8(<vscale x 1 x i8> %val, i8* %base, <vscale x 1 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
; CHECK-LABEL: test_vloxseg3_mask_nxv1i8_nxv1i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v10, v9
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
-; CHECK-NEXT: vloxseg3ei8.v v10, (a0), v9, v0.t
-; CHECK-NEXT: vmv1r.v v8, v11
+; CHECK-NEXT: vloxseg3ei8.v v7, (a0), v10, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vloxseg3.mask.nxv1i8.nxv1i8(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
define <vscale x 1 x i8> @test_vloxseg3_mask_nxv1i8_nxv1i32(<vscale x 1 x i8> %val, i8* %base, <vscale x 1 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
; CHECK-LABEL: test_vloxseg3_mask_nxv1i8_nxv1i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v10, v9
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
-; CHECK-NEXT: vloxseg3ei32.v v10, (a0), v9, v0.t
-; CHECK-NEXT: vmv1r.v v8, v11
+; CHECK-NEXT: vloxseg3ei32.v v7, (a0), v10, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vloxseg3.mask.nxv1i8.nxv1i32(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
define <vscale x 1 x i8> @test_vloxseg3_mask_nxv1i8_nxv1i16(<vscale x 1 x i8> %val, i8* %base, <vscale x 1 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
; CHECK-LABEL: test_vloxseg3_mask_nxv1i8_nxv1i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v10, v9
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
-; CHECK-NEXT: vloxseg3ei16.v v10, (a0), v9, v0.t
-; CHECK-NEXT: vmv1r.v v8, v11
+; CHECK-NEXT: vloxseg3ei16.v v7, (a0), v10, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vloxseg3.mask.nxv1i8.nxv1i16(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
; CHECK-LABEL: test_vloxseg4_mask_nxv1i8_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vloxseg4ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg4_mask_nxv1i8_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vloxseg4ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg4_mask_nxv1i8_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vloxseg4ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg5_mask_nxv1i8_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vloxseg5ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg5_mask_nxv1i8_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vloxseg5ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg5_mask_nxv1i8_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vloxseg5ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg6_mask_nxv1i8_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vloxseg6ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg6_mask_nxv1i8_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vloxseg6ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg6_mask_nxv1i8_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vloxseg6ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg7_mask_nxv1i8_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vloxseg7ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg7_mask_nxv1i8_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vloxseg7ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg7_mask_nxv1i8_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vloxseg7ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg8_mask_nxv1i8_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vloxseg8ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg8_mask_nxv1i8_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vloxseg8ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg8_mask_nxv1i8_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vloxseg8ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-NEXT: vmv2r.v v6, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
; CHECK-NEXT: vloxseg2ei16.v v6, (a0), v12, v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vloxseg2.mask.nxv16i8.nxv16i16(<vscale x 16 x i8> %val,<vscale x 16 x i8> %val, i8* %base, <vscale x 16 x i16> %index, <vscale x 16 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vmv2r.v v6, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
; CHECK-NEXT: vloxseg2ei8.v v6, (a0), v10, v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vloxseg2.mask.nxv16i8.nxv16i8(<vscale x 16 x i8> %val,<vscale x 16 x i8> %val, i8* %base, <vscale x 16 x i8> %index, <vscale x 16 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vmv2r.v v6, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
; CHECK-NEXT: vloxseg2ei32.v v6, (a0), v16, v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vloxseg2.mask.nxv16i8.nxv16i32(<vscale x 16 x i8> %val,<vscale x 16 x i8> %val, i8* %base, <vscale x 16 x i32> %index, <vscale x 16 x i1> %mask, i32 %vl, i32 1)
; CHECK-LABEL: test_vloxseg3_mask_nxv16i8_nxv16i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v6, v8
-; CHECK-NEXT: vmv2r.v v10, v6
+; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
; CHECK-NEXT: vloxseg3ei16.v v6, (a0), v12, v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vloxseg3.mask.nxv16i8.nxv16i16(<vscale x 16 x i8> %val,<vscale x 16 x i8> %val,<vscale x 16 x i8> %val, i8* %base, <vscale x 16 x i16> %index, <vscale x 16 x i1> %mask, i32 %vl, i32 1)
define <vscale x 16 x i8> @test_vloxseg3_mask_nxv16i8_nxv16i8(<vscale x 16 x i8> %val, i8* %base, <vscale x 16 x i8> %index, i32 %vl, <vscale x 16 x i1> %mask) {
; CHECK-LABEL: test_vloxseg3_mask_nxv16i8_nxv16i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v6, v8
+; CHECK-NEXT: vmv2r.v v12, v10
+; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
-; CHECK-NEXT: vloxseg3ei8.v v12, (a0), v10, v0.t
-; CHECK-NEXT: vmv2r.v v8, v14
+; CHECK-NEXT: vloxseg3ei8.v v6, (a0), v12, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vloxseg3.mask.nxv16i8.nxv16i8(<vscale x 16 x i8> %val,<vscale x 16 x i8> %val,<vscale x 16 x i8> %val, i8* %base, <vscale x 16 x i8> %index, <vscale x 16 x i1> %mask, i32 %vl, i32 1)
; CHECK-LABEL: test_vloxseg3_mask_nxv16i8_nxv16i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v6, v8
-; CHECK-NEXT: vmv2r.v v10, v6
+; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
; CHECK-NEXT: vloxseg3ei32.v v6, (a0), v16, v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vloxseg3.mask.nxv16i8.nxv16i32(<vscale x 16 x i8> %val,<vscale x 16 x i8> %val,<vscale x 16 x i8> %val, i8* %base, <vscale x 16 x i32> %index, <vscale x 16 x i1> %mask, i32 %vl, i32 1)
define <vscale x 16 x i8> @test_vloxseg4_mask_nxv16i8_nxv16i16(<vscale x 16 x i8> %val, i8* %base, <vscale x 16 x i16> %index, i32 %vl, <vscale x 16 x i1> %mask) {
; CHECK-LABEL: test_vloxseg4_mask_nxv16i8_nxv16i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv2r.v v16, v8
-; CHECK-NEXT: vmv2r.v v18, v16
-; CHECK-NEXT: vmv2r.v v20, v16
-; CHECK-NEXT: vmv2r.v v22, v16
+; CHECK-NEXT: vmv2r.v v6, v8
+; CHECK-NEXT: vmv2r.v v10, v8
+; CHECK-NEXT: vmv4r.v v16, v12
+; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
-; CHECK-NEXT: vloxseg4ei16.v v16, (a0), v12, v0.t
-; CHECK-NEXT: vmv2r.v v8, v18
+; CHECK-NEXT: vloxseg4ei16.v v6, (a0), v16, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vloxseg4.mask.nxv16i8.nxv16i16(<vscale x 16 x i8> %val,<vscale x 16 x i8> %val,<vscale x 16 x i8> %val,<vscale x 16 x i8> %val, i8* %base, <vscale x 16 x i16> %index, <vscale x 16 x i1> %mask, i32 %vl, i32 1)
; CHECK-LABEL: test_vloxseg4_mask_nxv16i8_nxv16i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
; CHECK-NEXT: vloxseg4ei8.v v12, (a0), v10, v0.t
; CHECK-NEXT: vmv2r.v v8, v14
; CHECK-LABEL: test_vloxseg4_mask_nxv16i8_nxv16i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v6, v8
-; CHECK-NEXT: vmv2r.v v10, v6
-; CHECK-NEXT: vmv2r.v v12, v6
+; CHECK-NEXT: vmv2r.v v10, v8
+; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
; CHECK-NEXT: vloxseg4ei32.v v6, (a0), v16, v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vloxseg4.mask.nxv16i8.nxv16i32(<vscale x 16 x i8> %val,<vscale x 16 x i8> %val,<vscale x 16 x i8> %val,<vscale x 16 x i8> %val, i8* %base, <vscale x 16 x i32> %index, <vscale x 16 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vloxseg2ei32.v v7, (a0), v9, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vloxseg2.mask.nxv2i32.nxv2i32(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vloxseg2ei8.v v7, (a0), v9, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vloxseg2.mask.nxv2i32.nxv2i8(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vloxseg2ei16.v v7, (a0), v9, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vloxseg2.mask.nxv2i32.nxv2i16(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
define <vscale x 2 x i32> @test_vloxseg3_mask_nxv2i32_nxv2i32(<vscale x 2 x i32> %val, i32* %base, <vscale x 2 x i32> %index, i32 %vl, <vscale x 2 x i1> %mask) {
; CHECK-LABEL: test_vloxseg3_mask_nxv2i32_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v10, v9
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
-; CHECK-NEXT: vloxseg3ei32.v v10, (a0), v9, v0.t
-; CHECK-NEXT: vmv1r.v v8, v11
+; CHECK-NEXT: vloxseg3ei32.v v7, (a0), v10, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vloxseg3.mask.nxv2i32.nxv2i32(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
define <vscale x 2 x i32> @test_vloxseg3_mask_nxv2i32_nxv2i8(<vscale x 2 x i32> %val, i32* %base, <vscale x 2 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
; CHECK-LABEL: test_vloxseg3_mask_nxv2i32_nxv2i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v10, v9
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
-; CHECK-NEXT: vloxseg3ei8.v v10, (a0), v9, v0.t
-; CHECK-NEXT: vmv1r.v v8, v11
+; CHECK-NEXT: vloxseg3ei8.v v7, (a0), v10, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vloxseg3.mask.nxv2i32.nxv2i8(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
define <vscale x 2 x i32> @test_vloxseg3_mask_nxv2i32_nxv2i16(<vscale x 2 x i32> %val, i32* %base, <vscale x 2 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
; CHECK-LABEL: test_vloxseg3_mask_nxv2i32_nxv2i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v10, v9
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
-; CHECK-NEXT: vloxseg3ei16.v v10, (a0), v9, v0.t
-; CHECK-NEXT: vmv1r.v v8, v11
+; CHECK-NEXT: vloxseg3ei16.v v7, (a0), v10, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vloxseg3.mask.nxv2i32.nxv2i16(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
; CHECK-LABEL: test_vloxseg4_mask_nxv2i32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vloxseg4ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg4_mask_nxv2i32_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vloxseg4ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg4_mask_nxv2i32_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vloxseg4ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg5_mask_nxv2i32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vloxseg5ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg5_mask_nxv2i32_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vloxseg5ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg5_mask_nxv2i32_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vloxseg5ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg6_mask_nxv2i32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vloxseg6ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg6_mask_nxv2i32_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vloxseg6ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg6_mask_nxv2i32_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vloxseg6ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg7_mask_nxv2i32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vloxseg7ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg7_mask_nxv2i32_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vloxseg7ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg7_mask_nxv2i32_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vloxseg7ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg8_mask_nxv2i32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vloxseg8ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg8_mask_nxv2i32_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vloxseg8ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg8_mask_nxv2i32_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vloxseg8ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vloxseg2ei16.v v7, (a0), v9, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vloxseg2.mask.nxv4i16.nxv4i16(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vloxseg2ei8.v v7, (a0), v9, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vloxseg2.mask.nxv4i16.nxv4i8(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vloxseg2ei32.v v7, (a0), v10, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vloxseg2.mask.nxv4i16.nxv4i32(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
define <vscale x 4 x i16> @test_vloxseg3_mask_nxv4i16_nxv4i16(<vscale x 4 x i16> %val, i16* %base, <vscale x 4 x i16> %index, i32 %vl, <vscale x 4 x i1> %mask) {
; CHECK-LABEL: test_vloxseg3_mask_nxv4i16_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v10, v9
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
-; CHECK-NEXT: vloxseg3ei16.v v10, (a0), v9, v0.t
-; CHECK-NEXT: vmv1r.v v8, v11
+; CHECK-NEXT: vloxseg3ei16.v v7, (a0), v10, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vloxseg3.mask.nxv4i16.nxv4i16(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
define <vscale x 4 x i16> @test_vloxseg3_mask_nxv4i16_nxv4i8(<vscale x 4 x i16> %val, i16* %base, <vscale x 4 x i8> %index, i32 %vl, <vscale x 4 x i1> %mask) {
; CHECK-LABEL: test_vloxseg3_mask_nxv4i16_nxv4i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v10, v9
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
-; CHECK-NEXT: vloxseg3ei8.v v10, (a0), v9, v0.t
-; CHECK-NEXT: vmv1r.v v8, v11
+; CHECK-NEXT: vloxseg3ei8.v v7, (a0), v10, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vloxseg3.mask.nxv4i16.nxv4i8(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
; CHECK-LABEL: test_vloxseg3_mask_nxv4i16_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vloxseg3ei32.v v7, (a0), v10, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vloxseg3.mask.nxv4i16.nxv4i32(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
; CHECK-LABEL: test_vloxseg4_mask_nxv4i16_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vloxseg4ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg4_mask_nxv4i16_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vloxseg4ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
define <vscale x 4 x i16> @test_vloxseg4_mask_nxv4i16_nxv4i32(<vscale x 4 x i16> %val, i16* %base, <vscale x 4 x i32> %index, i32 %vl, <vscale x 4 x i1> %mask) {
; CHECK-LABEL: test_vloxseg4_mask_nxv4i16_nxv4i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv2r.v v12, v10
+; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
-; CHECK-NEXT: vloxseg4ei32.v v12, (a0), v10, v0.t
-; CHECK-NEXT: vmv1r.v v8, v13
+; CHECK-NEXT: vloxseg4ei32.v v7, (a0), v12, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vloxseg4.mask.nxv4i16.nxv4i32(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
; CHECK-LABEL: test_vloxseg5_mask_nxv4i16_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vloxseg5ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg5_mask_nxv4i16_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vloxseg5ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg5_mask_nxv4i16_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vloxseg5ei32.v v12, (a0), v10, v0.t
; CHECK-NEXT: vmv1r.v v8, v13
; CHECK-LABEL: test_vloxseg6_mask_nxv4i16_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vloxseg6ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg6_mask_nxv4i16_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vloxseg6ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg6_mask_nxv4i16_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vloxseg6ei32.v v12, (a0), v10, v0.t
; CHECK-NEXT: vmv1r.v v8, v13
; CHECK-LABEL: test_vloxseg7_mask_nxv4i16_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vloxseg7ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg7_mask_nxv4i16_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vloxseg7ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg7_mask_nxv4i16_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
-; CHECK-NEXT: vmv1r.v v18, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vloxseg7ei32.v v12, (a0), v10, v0.t
; CHECK-NEXT: vmv1r.v v8, v13
; CHECK-LABEL: test_vloxseg8_mask_nxv4i16_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vloxseg8ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg8_mask_nxv4i16_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vloxseg8ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg8_mask_nxv4i16_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
-; CHECK-NEXT: vmv1r.v v18, v12
-; CHECK-NEXT: vmv1r.v v19, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
+; CHECK-NEXT: vmv1r.v v19, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vloxseg8ei32.v v12, (a0), v10, v0.t
; CHECK-NEXT: vmv1r.v v8, v13
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vloxseg2ei8.v v7, (a0), v9, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vloxseg2.mask.nxv1i32.nxv1i8(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vloxseg2ei32.v v7, (a0), v9, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vloxseg2.mask.nxv1i32.nxv1i32(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vloxseg2ei16.v v7, (a0), v9, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vloxseg2.mask.nxv1i32.nxv1i16(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
define <vscale x 1 x i32> @test_vloxseg3_mask_nxv1i32_nxv1i8(<vscale x 1 x i32> %val, i32* %base, <vscale x 1 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
; CHECK-LABEL: test_vloxseg3_mask_nxv1i32_nxv1i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v10, v9
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
-; CHECK-NEXT: vloxseg3ei8.v v10, (a0), v9, v0.t
-; CHECK-NEXT: vmv1r.v v8, v11
+; CHECK-NEXT: vloxseg3ei8.v v7, (a0), v10, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vloxseg3.mask.nxv1i32.nxv1i8(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
define <vscale x 1 x i32> @test_vloxseg3_mask_nxv1i32_nxv1i32(<vscale x 1 x i32> %val, i32* %base, <vscale x 1 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
; CHECK-LABEL: test_vloxseg3_mask_nxv1i32_nxv1i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v10, v9
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
-; CHECK-NEXT: vloxseg3ei32.v v10, (a0), v9, v0.t
-; CHECK-NEXT: vmv1r.v v8, v11
+; CHECK-NEXT: vloxseg3ei32.v v7, (a0), v10, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vloxseg3.mask.nxv1i32.nxv1i32(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
define <vscale x 1 x i32> @test_vloxseg3_mask_nxv1i32_nxv1i16(<vscale x 1 x i32> %val, i32* %base, <vscale x 1 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
; CHECK-LABEL: test_vloxseg3_mask_nxv1i32_nxv1i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v10, v9
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
-; CHECK-NEXT: vloxseg3ei16.v v10, (a0), v9, v0.t
-; CHECK-NEXT: vmv1r.v v8, v11
+; CHECK-NEXT: vloxseg3ei16.v v7, (a0), v10, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vloxseg3.mask.nxv1i32.nxv1i16(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
; CHECK-LABEL: test_vloxseg4_mask_nxv1i32_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vloxseg4ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg4_mask_nxv1i32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vloxseg4ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg4_mask_nxv1i32_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vloxseg4ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg5_mask_nxv1i32_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vloxseg5ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg5_mask_nxv1i32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vloxseg5ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg5_mask_nxv1i32_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vloxseg5ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg6_mask_nxv1i32_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vloxseg6ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg6_mask_nxv1i32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vloxseg6ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg6_mask_nxv1i32_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vloxseg6ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg7_mask_nxv1i32_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vloxseg7ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg7_mask_nxv1i32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vloxseg7ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg7_mask_nxv1i32_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vloxseg7ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg8_mask_nxv1i32_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vloxseg8ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg8_mask_nxv1i32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vloxseg8ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg8_mask_nxv1i32_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vloxseg8ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-NEXT: vmv2r.v v6, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
; CHECK-NEXT: vloxseg2ei16.v v6, (a0), v10, v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vloxseg2.mask.nxv8i16.nxv8i16(<vscale x 8 x i16> %val,<vscale x 8 x i16> %val, i16* %base, <vscale x 8 x i16> %index, <vscale x 8 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vmv2r.v v6, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
; CHECK-NEXT: vloxseg2ei8.v v6, (a0), v10, v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vloxseg2.mask.nxv8i16.nxv8i8(<vscale x 8 x i16> %val,<vscale x 8 x i16> %val, i16* %base, <vscale x 8 x i8> %index, <vscale x 8 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vmv2r.v v6, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
; CHECK-NEXT: vloxseg2ei32.v v6, (a0), v12, v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vloxseg2.mask.nxv8i16.nxv8i32(<vscale x 8 x i16> %val,<vscale x 8 x i16> %val, i16* %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i32 %vl, i32 1)
define <vscale x 8 x i16> @test_vloxseg3_mask_nxv8i16_nxv8i16(<vscale x 8 x i16> %val, i16* %base, <vscale x 8 x i16> %index, i32 %vl, <vscale x 8 x i1> %mask) {
; CHECK-LABEL: test_vloxseg3_mask_nxv8i16_nxv8i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v6, v8
+; CHECK-NEXT: vmv2r.v v12, v10
+; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
-; CHECK-NEXT: vloxseg3ei16.v v12, (a0), v10, v0.t
-; CHECK-NEXT: vmv2r.v v8, v14
+; CHECK-NEXT: vloxseg3ei16.v v6, (a0), v12, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vloxseg3.mask.nxv8i16.nxv8i16(<vscale x 8 x i16> %val,<vscale x 8 x i16> %val,<vscale x 8 x i16> %val, i16* %base, <vscale x 8 x i16> %index, <vscale x 8 x i1> %mask, i32 %vl, i32 1)
define <vscale x 8 x i16> @test_vloxseg3_mask_nxv8i16_nxv8i8(<vscale x 8 x i16> %val, i16* %base, <vscale x 8 x i8> %index, i32 %vl, <vscale x 8 x i1> %mask) {
; CHECK-LABEL: test_vloxseg3_mask_nxv8i16_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v6, v8
+; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
-; CHECK-NEXT: vloxseg3ei8.v v12, (a0), v10, v0.t
-; CHECK-NEXT: vmv2r.v v8, v14
+; CHECK-NEXT: vloxseg3ei8.v v6, (a0), v12, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vloxseg3.mask.nxv8i16.nxv8i8(<vscale x 8 x i16> %val,<vscale x 8 x i16> %val,<vscale x 8 x i16> %val, i16* %base, <vscale x 8 x i8> %index, <vscale x 8 x i1> %mask, i32 %vl, i32 1)
; CHECK-LABEL: test_vloxseg3_mask_nxv8i16_nxv8i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v6, v8
-; CHECK-NEXT: vmv2r.v v10, v6
+; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
; CHECK-NEXT: vloxseg3ei32.v v6, (a0), v12, v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vloxseg3.mask.nxv8i16.nxv8i32(<vscale x 8 x i16> %val,<vscale x 8 x i16> %val,<vscale x 8 x i16> %val, i16* %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i32 %vl, i32 1)
; CHECK-LABEL: test_vloxseg4_mask_nxv8i16_nxv8i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
; CHECK-NEXT: vloxseg4ei16.v v12, (a0), v10, v0.t
; CHECK-NEXT: vmv2r.v v8, v14
; CHECK-LABEL: test_vloxseg4_mask_nxv8i16_nxv8i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
; CHECK-NEXT: vloxseg4ei8.v v12, (a0), v10, v0.t
; CHECK-NEXT: vmv2r.v v8, v14
define <vscale x 8 x i16> @test_vloxseg4_mask_nxv8i16_nxv8i32(<vscale x 8 x i16> %val, i16* %base, <vscale x 8 x i32> %index, i32 %vl, <vscale x 8 x i1> %mask) {
; CHECK-LABEL: test_vloxseg4_mask_nxv8i16_nxv8i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv2r.v v16, v8
-; CHECK-NEXT: vmv2r.v v18, v16
-; CHECK-NEXT: vmv2r.v v20, v16
-; CHECK-NEXT: vmv2r.v v22, v16
+; CHECK-NEXT: vmv2r.v v6, v8
+; CHECK-NEXT: vmv2r.v v10, v8
+; CHECK-NEXT: vmv4r.v v16, v12
+; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
-; CHECK-NEXT: vloxseg4ei32.v v16, (a0), v12, v0.t
-; CHECK-NEXT: vmv2r.v v8, v18
+; CHECK-NEXT: vloxseg4ei32.v v6, (a0), v16, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vloxseg4.mask.nxv8i16.nxv8i32(<vscale x 8 x i16> %val,<vscale x 8 x i16> %val,<vscale x 8 x i16> %val,<vscale x 8 x i16> %val, i16* %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vloxseg2ei16.v v7, (a0), v10, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vloxseg2.mask.nxv8i8.nxv8i16(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i16> %index, <vscale x 8 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vloxseg2ei8.v v7, (a0), v9, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vloxseg2.mask.nxv8i8.nxv8i8(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i8> %index, <vscale x 8 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vloxseg2ei32.v v7, (a0), v12, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vloxseg2.mask.nxv8i8.nxv8i32(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i32 %vl, i32 1)
; CHECK-LABEL: test_vloxseg3_mask_nxv8i8_nxv8i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vloxseg3ei16.v v7, (a0), v10, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vloxseg3.mask.nxv8i8.nxv8i16(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i16> %index, <vscale x 8 x i1> %mask, i32 %vl, i32 1)
define <vscale x 8 x i8> @test_vloxseg3_mask_nxv8i8_nxv8i8(<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i8> %index, i32 %vl, <vscale x 8 x i1> %mask) {
; CHECK-LABEL: test_vloxseg3_mask_nxv8i8_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v10, v9
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
-; CHECK-NEXT: vloxseg3ei8.v v10, (a0), v9, v0.t
-; CHECK-NEXT: vmv1r.v v8, v11
+; CHECK-NEXT: vloxseg3ei8.v v7, (a0), v10, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vloxseg3.mask.nxv8i8.nxv8i8(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i8> %index, <vscale x 8 x i1> %mask, i32 %vl, i32 1)
; CHECK-LABEL: test_vloxseg3_mask_nxv8i8_nxv8i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vloxseg3ei32.v v7, (a0), v12, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vloxseg3.mask.nxv8i8.nxv8i32(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i32 %vl, i32 1)
define <vscale x 8 x i8> @test_vloxseg4_mask_nxv8i8_nxv8i16(<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i16> %index, i32 %vl, <vscale x 8 x i1> %mask) {
; CHECK-LABEL: test_vloxseg4_mask_nxv8i8_nxv8i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv2r.v v12, v10
+; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
-; CHECK-NEXT: vloxseg4ei16.v v12, (a0), v10, v0.t
-; CHECK-NEXT: vmv1r.v v8, v13
+; CHECK-NEXT: vloxseg4ei16.v v7, (a0), v12, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vloxseg4.mask.nxv8i8.nxv8i16(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i16> %index, <vscale x 8 x i1> %mask, i32 %vl, i32 1)
; CHECK-LABEL: test_vloxseg4_mask_nxv8i8_nxv8i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vloxseg4ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg4_mask_nxv8i8_nxv8i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
-; CHECK-NEXT: vmv1r.v v10, v7
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vloxseg4ei32.v v7, (a0), v12, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vloxseg4.mask.nxv8i8.nxv8i32(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i32 %vl, i32 1)
; CHECK-LABEL: test_vloxseg5_mask_nxv8i8_nxv8i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vloxseg5ei16.v v12, (a0), v10, v0.t
; CHECK-NEXT: vmv1r.v v8, v13
; CHECK-LABEL: test_vloxseg5_mask_nxv8i8_nxv8i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vloxseg5ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg5_mask_nxv8i8_nxv8i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
-; CHECK-NEXT: vmv1r.v v10, v7
-; CHECK-NEXT: vmv1r.v v11, v7
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vmv1r.v v11, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vloxseg5ei32.v v7, (a0), v12, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vloxseg5.mask.nxv8i8.nxv8i32(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i32 %vl, i32 1)
; CHECK-LABEL: test_vloxseg6_mask_nxv8i8_nxv8i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vloxseg6ei16.v v12, (a0), v10, v0.t
; CHECK-NEXT: vmv1r.v v8, v13
; CHECK-LABEL: test_vloxseg6_mask_nxv8i8_nxv8i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vloxseg6ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
define <vscale x 8 x i8> @test_vloxseg6_mask_nxv8i8_nxv8i32(<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i32> %index, i32 %vl, <vscale x 8 x i1> %mask) {
; CHECK-LABEL: test_vloxseg6_mask_nxv8i8_nxv8i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v16, v8
-; CHECK-NEXT: vmv1r.v v17, v16
-; CHECK-NEXT: vmv1r.v v18, v16
-; CHECK-NEXT: vmv1r.v v19, v16
-; CHECK-NEXT: vmv1r.v v20, v16
-; CHECK-NEXT: vmv1r.v v21, v16
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv4r.v v16, v12
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
-; CHECK-NEXT: vloxseg6ei32.v v16, (a0), v12, v0.t
-; CHECK-NEXT: vmv1r.v v8, v17
+; CHECK-NEXT: vloxseg6ei32.v v7, (a0), v16, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vloxseg6.mask.nxv8i8.nxv8i32(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i32 %vl, i32 1)
; CHECK-LABEL: test_vloxseg7_mask_nxv8i8_nxv8i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
-; CHECK-NEXT: vmv1r.v v18, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vloxseg7ei16.v v12, (a0), v10, v0.t
; CHECK-NEXT: vmv1r.v v8, v13
; CHECK-LABEL: test_vloxseg7_mask_nxv8i8_nxv8i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vloxseg7ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg7_mask_nxv8i8_nxv8i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v16, v8
-; CHECK-NEXT: vmv1r.v v17, v16
-; CHECK-NEXT: vmv1r.v v18, v16
-; CHECK-NEXT: vmv1r.v v19, v16
-; CHECK-NEXT: vmv1r.v v20, v16
-; CHECK-NEXT: vmv1r.v v21, v16
-; CHECK-NEXT: vmv1r.v v22, v16
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
+; CHECK-NEXT: vmv1r.v v19, v8
+; CHECK-NEXT: vmv1r.v v20, v8
+; CHECK-NEXT: vmv1r.v v21, v8
+; CHECK-NEXT: vmv1r.v v22, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vloxseg7ei32.v v16, (a0), v12, v0.t
; CHECK-NEXT: vmv1r.v v8, v17
; CHECK-LABEL: test_vloxseg8_mask_nxv8i8_nxv8i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
-; CHECK-NEXT: vmv1r.v v18, v12
-; CHECK-NEXT: vmv1r.v v19, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
+; CHECK-NEXT: vmv1r.v v19, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vloxseg8ei16.v v12, (a0), v10, v0.t
; CHECK-NEXT: vmv1r.v v8, v13
; CHECK-LABEL: test_vloxseg8_mask_nxv8i8_nxv8i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vloxseg8ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg8_mask_nxv8i8_nxv8i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v16, v8
-; CHECK-NEXT: vmv1r.v v17, v16
-; CHECK-NEXT: vmv1r.v v18, v16
-; CHECK-NEXT: vmv1r.v v19, v16
-; CHECK-NEXT: vmv1r.v v20, v16
-; CHECK-NEXT: vmv1r.v v21, v16
-; CHECK-NEXT: vmv1r.v v22, v16
-; CHECK-NEXT: vmv1r.v v23, v16
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
+; CHECK-NEXT: vmv1r.v v19, v8
+; CHECK-NEXT: vmv1r.v v20, v8
+; CHECK-NEXT: vmv1r.v v21, v8
+; CHECK-NEXT: vmv1r.v v22, v8
+; CHECK-NEXT: vmv1r.v v23, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vloxseg8ei32.v v16, (a0), v12, v0.t
; CHECK-NEXT: vmv1r.v v8, v17
; CHECK-NEXT: vmv4r.v v4, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
; CHECK-NEXT: vloxseg2ei16.v v4, (a0), v12, v0.t
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i32>,<vscale x 8 x i32>} @llvm.riscv.vloxseg2.mask.nxv8i32.nxv8i16(<vscale x 8 x i32> %val,<vscale x 8 x i32> %val, i32* %base, <vscale x 8 x i16> %index, <vscale x 8 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vmv4r.v v4, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
; CHECK-NEXT: vloxseg2ei8.v v4, (a0), v12, v0.t
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i32>,<vscale x 8 x i32>} @llvm.riscv.vloxseg2.mask.nxv8i32.nxv8i8(<vscale x 8 x i32> %val,<vscale x 8 x i32> %val, i32* %base, <vscale x 8 x i8> %index, <vscale x 8 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vmv4r.v v4, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
; CHECK-NEXT: vloxseg2ei32.v v4, (a0), v12, v0.t
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i32>,<vscale x 8 x i32>} @llvm.riscv.vloxseg2.mask.nxv8i32.nxv8i32(<vscale x 8 x i32> %val,<vscale x 8 x i32> %val, i32* %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vloxseg2ei16.v v7, (a0), v9, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vloxseg2.mask.nxv4i8.nxv4i16(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vloxseg2ei8.v v7, (a0), v9, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vloxseg2.mask.nxv4i8.nxv4i8(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vloxseg2ei32.v v7, (a0), v10, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vloxseg2.mask.nxv4i8.nxv4i32(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
define <vscale x 4 x i8> @test_vloxseg3_mask_nxv4i8_nxv4i16(<vscale x 4 x i8> %val, i8* %base, <vscale x 4 x i16> %index, i32 %vl, <vscale x 4 x i1> %mask) {
; CHECK-LABEL: test_vloxseg3_mask_nxv4i8_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v10, v9
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
-; CHECK-NEXT: vloxseg3ei16.v v10, (a0), v9, v0.t
-; CHECK-NEXT: vmv1r.v v8, v11
+; CHECK-NEXT: vloxseg3ei16.v v7, (a0), v10, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vloxseg3.mask.nxv4i8.nxv4i16(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
define <vscale x 4 x i8> @test_vloxseg3_mask_nxv4i8_nxv4i8(<vscale x 4 x i8> %val, i8* %base, <vscale x 4 x i8> %index, i32 %vl, <vscale x 4 x i1> %mask) {
; CHECK-LABEL: test_vloxseg3_mask_nxv4i8_nxv4i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v10, v9
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
-; CHECK-NEXT: vloxseg3ei8.v v10, (a0), v9, v0.t
-; CHECK-NEXT: vmv1r.v v8, v11
+; CHECK-NEXT: vloxseg3ei8.v v7, (a0), v10, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vloxseg3.mask.nxv4i8.nxv4i8(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
; CHECK-LABEL: test_vloxseg3_mask_nxv4i8_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vloxseg3ei32.v v7, (a0), v10, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vloxseg3.mask.nxv4i8.nxv4i32(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
; CHECK-LABEL: test_vloxseg4_mask_nxv4i8_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vloxseg4ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg4_mask_nxv4i8_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vloxseg4ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
define <vscale x 4 x i8> @test_vloxseg4_mask_nxv4i8_nxv4i32(<vscale x 4 x i8> %val, i8* %base, <vscale x 4 x i32> %index, i32 %vl, <vscale x 4 x i1> %mask) {
; CHECK-LABEL: test_vloxseg4_mask_nxv4i8_nxv4i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv2r.v v12, v10
+; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
-; CHECK-NEXT: vloxseg4ei32.v v12, (a0), v10, v0.t
-; CHECK-NEXT: vmv1r.v v8, v13
+; CHECK-NEXT: vloxseg4ei32.v v7, (a0), v12, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vloxseg4.mask.nxv4i8.nxv4i32(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
; CHECK-LABEL: test_vloxseg5_mask_nxv4i8_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vloxseg5ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg5_mask_nxv4i8_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vloxseg5ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg5_mask_nxv4i8_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vloxseg5ei32.v v12, (a0), v10, v0.t
; CHECK-NEXT: vmv1r.v v8, v13
; CHECK-LABEL: test_vloxseg6_mask_nxv4i8_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vloxseg6ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg6_mask_nxv4i8_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vloxseg6ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg6_mask_nxv4i8_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vloxseg6ei32.v v12, (a0), v10, v0.t
; CHECK-NEXT: vmv1r.v v8, v13
; CHECK-LABEL: test_vloxseg7_mask_nxv4i8_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vloxseg7ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg7_mask_nxv4i8_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vloxseg7ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg7_mask_nxv4i8_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
-; CHECK-NEXT: vmv1r.v v18, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vloxseg7ei32.v v12, (a0), v10, v0.t
; CHECK-NEXT: vmv1r.v v8, v13
; CHECK-LABEL: test_vloxseg8_mask_nxv4i8_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vloxseg8ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg8_mask_nxv4i8_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vloxseg8ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg8_mask_nxv4i8_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
-; CHECK-NEXT: vmv1r.v v18, v12
-; CHECK-NEXT: vmv1r.v v19, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
+; CHECK-NEXT: vmv1r.v v19, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vloxseg8ei32.v v12, (a0), v10, v0.t
; CHECK-NEXT: vmv1r.v v8, v13
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vloxseg2ei8.v v7, (a0), v9, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vloxseg2.mask.nxv1i16.nxv1i8(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vloxseg2ei32.v v7, (a0), v9, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vloxseg2.mask.nxv1i16.nxv1i32(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vloxseg2ei16.v v7, (a0), v9, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vloxseg2.mask.nxv1i16.nxv1i16(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
define <vscale x 1 x i16> @test_vloxseg3_mask_nxv1i16_nxv1i8(<vscale x 1 x i16> %val, i16* %base, <vscale x 1 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
; CHECK-LABEL: test_vloxseg3_mask_nxv1i16_nxv1i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v10, v9
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
-; CHECK-NEXT: vloxseg3ei8.v v10, (a0), v9, v0.t
-; CHECK-NEXT: vmv1r.v v8, v11
+; CHECK-NEXT: vloxseg3ei8.v v7, (a0), v10, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vloxseg3.mask.nxv1i16.nxv1i8(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
define <vscale x 1 x i16> @test_vloxseg3_mask_nxv1i16_nxv1i32(<vscale x 1 x i16> %val, i16* %base, <vscale x 1 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
; CHECK-LABEL: test_vloxseg3_mask_nxv1i16_nxv1i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v10, v9
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
-; CHECK-NEXT: vloxseg3ei32.v v10, (a0), v9, v0.t
-; CHECK-NEXT: vmv1r.v v8, v11
+; CHECK-NEXT: vloxseg3ei32.v v7, (a0), v10, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vloxseg3.mask.nxv1i16.nxv1i32(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
define <vscale x 1 x i16> @test_vloxseg3_mask_nxv1i16_nxv1i16(<vscale x 1 x i16> %val, i16* %base, <vscale x 1 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
; CHECK-LABEL: test_vloxseg3_mask_nxv1i16_nxv1i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v10, v9
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
-; CHECK-NEXT: vloxseg3ei16.v v10, (a0), v9, v0.t
-; CHECK-NEXT: vmv1r.v v8, v11
+; CHECK-NEXT: vloxseg3ei16.v v7, (a0), v10, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vloxseg3.mask.nxv1i16.nxv1i16(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
; CHECK-LABEL: test_vloxseg4_mask_nxv1i16_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vloxseg4ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg4_mask_nxv1i16_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vloxseg4ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg4_mask_nxv1i16_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vloxseg4ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg5_mask_nxv1i16_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vloxseg5ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg5_mask_nxv1i16_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vloxseg5ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg5_mask_nxv1i16_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vloxseg5ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg6_mask_nxv1i16_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vloxseg6ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg6_mask_nxv1i16_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vloxseg6ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg6_mask_nxv1i16_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vloxseg6ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg7_mask_nxv1i16_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vloxseg7ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg7_mask_nxv1i16_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vloxseg7ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg7_mask_nxv1i16_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vloxseg7ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg8_mask_nxv1i16_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vloxseg8ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg8_mask_nxv1i16_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vloxseg8ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg8_mask_nxv1i16_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vloxseg8ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-NEXT: vmv4r.v v4, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu
; CHECK-NEXT: vloxseg2ei16.v v4, (a0), v16, v0.t
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 32 x i8>,<vscale x 32 x i8>} @llvm.riscv.vloxseg2.mask.nxv32i8.nxv32i16(<vscale x 32 x i8> %val,<vscale x 32 x i8> %val, i8* %base, <vscale x 32 x i16> %index, <vscale x 32 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vmv4r.v v4, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu
; CHECK-NEXT: vloxseg2ei8.v v4, (a0), v12, v0.t
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 32 x i8>,<vscale x 32 x i8>} @llvm.riscv.vloxseg2.mask.nxv32i8.nxv32i8(<vscale x 32 x i8> %val,<vscale x 32 x i8> %val, i8* %base, <vscale x 32 x i8> %index, <vscale x 32 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vloxseg2ei32.v v7, (a0), v9, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vloxseg2.mask.nxv2i8.nxv2i32(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, i8* %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vloxseg2ei8.v v7, (a0), v9, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vloxseg2.mask.nxv2i8.nxv2i8(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, i8* %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vloxseg2ei16.v v7, (a0), v9, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vloxseg2.mask.nxv2i8.nxv2i16(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, i8* %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
define <vscale x 2 x i8> @test_vloxseg3_mask_nxv2i8_nxv2i32(<vscale x 2 x i8> %val, i8* %base, <vscale x 2 x i32> %index, i32 %vl, <vscale x 2 x i1> %mask) {
; CHECK-LABEL: test_vloxseg3_mask_nxv2i8_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v10, v9
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
-; CHECK-NEXT: vloxseg3ei32.v v10, (a0), v9, v0.t
-; CHECK-NEXT: vmv1r.v v8, v11
+; CHECK-NEXT: vloxseg3ei32.v v7, (a0), v10, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vloxseg3.mask.nxv2i8.nxv2i32(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, i8* %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
define <vscale x 2 x i8> @test_vloxseg3_mask_nxv2i8_nxv2i8(<vscale x 2 x i8> %val, i8* %base, <vscale x 2 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
; CHECK-LABEL: test_vloxseg3_mask_nxv2i8_nxv2i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v10, v9
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
-; CHECK-NEXT: vloxseg3ei8.v v10, (a0), v9, v0.t
-; CHECK-NEXT: vmv1r.v v8, v11
+; CHECK-NEXT: vloxseg3ei8.v v7, (a0), v10, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vloxseg3.mask.nxv2i8.nxv2i8(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, i8* %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
define <vscale x 2 x i8> @test_vloxseg3_mask_nxv2i8_nxv2i16(<vscale x 2 x i8> %val, i8* %base, <vscale x 2 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
; CHECK-LABEL: test_vloxseg3_mask_nxv2i8_nxv2i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v10, v9
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
-; CHECK-NEXT: vloxseg3ei16.v v10, (a0), v9, v0.t
-; CHECK-NEXT: vmv1r.v v8, v11
+; CHECK-NEXT: vloxseg3ei16.v v7, (a0), v10, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vloxseg3.mask.nxv2i8.nxv2i16(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, i8* %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
; CHECK-LABEL: test_vloxseg4_mask_nxv2i8_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vloxseg4ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg4_mask_nxv2i8_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vloxseg4ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg4_mask_nxv2i8_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vloxseg4ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg5_mask_nxv2i8_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vloxseg5ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg5_mask_nxv2i8_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vloxseg5ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg5_mask_nxv2i8_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vloxseg5ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg6_mask_nxv2i8_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vloxseg6ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg6_mask_nxv2i8_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vloxseg6ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg6_mask_nxv2i8_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vloxseg6ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg7_mask_nxv2i8_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vloxseg7ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg7_mask_nxv2i8_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vloxseg7ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg7_mask_nxv2i8_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vloxseg7ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg8_mask_nxv2i8_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vloxseg8ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg8_mask_nxv2i8_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vloxseg8ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg8_mask_nxv2i8_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vloxseg8ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vloxseg2ei32.v v7, (a0), v9, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vloxseg2.mask.nxv2i16.nxv2i32(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, i16* %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vloxseg2ei8.v v7, (a0), v9, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vloxseg2.mask.nxv2i16.nxv2i8(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, i16* %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vloxseg2ei16.v v7, (a0), v9, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vloxseg2.mask.nxv2i16.nxv2i16(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, i16* %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
define <vscale x 2 x i16> @test_vloxseg3_mask_nxv2i16_nxv2i32(<vscale x 2 x i16> %val, i16* %base, <vscale x 2 x i32> %index, i32 %vl, <vscale x 2 x i1> %mask) {
; CHECK-LABEL: test_vloxseg3_mask_nxv2i16_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v10, v9
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
-; CHECK-NEXT: vloxseg3ei32.v v10, (a0), v9, v0.t
-; CHECK-NEXT: vmv1r.v v8, v11
+; CHECK-NEXT: vloxseg3ei32.v v7, (a0), v10, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vloxseg3.mask.nxv2i16.nxv2i32(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, i16* %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
define <vscale x 2 x i16> @test_vloxseg3_mask_nxv2i16_nxv2i8(<vscale x 2 x i16> %val, i16* %base, <vscale x 2 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
; CHECK-LABEL: test_vloxseg3_mask_nxv2i16_nxv2i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v10, v9
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
-; CHECK-NEXT: vloxseg3ei8.v v10, (a0), v9, v0.t
-; CHECK-NEXT: vmv1r.v v8, v11
+; CHECK-NEXT: vloxseg3ei8.v v7, (a0), v10, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vloxseg3.mask.nxv2i16.nxv2i8(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, i16* %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
define <vscale x 2 x i16> @test_vloxseg3_mask_nxv2i16_nxv2i16(<vscale x 2 x i16> %val, i16* %base, <vscale x 2 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
; CHECK-LABEL: test_vloxseg3_mask_nxv2i16_nxv2i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v10, v9
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
-; CHECK-NEXT: vloxseg3ei16.v v10, (a0), v9, v0.t
-; CHECK-NEXT: vmv1r.v v8, v11
+; CHECK-NEXT: vloxseg3ei16.v v7, (a0), v10, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vloxseg3.mask.nxv2i16.nxv2i16(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, i16* %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
; CHECK-LABEL: test_vloxseg4_mask_nxv2i16_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vloxseg4ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg4_mask_nxv2i16_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vloxseg4ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg4_mask_nxv2i16_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vloxseg4ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg5_mask_nxv2i16_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vloxseg5ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg5_mask_nxv2i16_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vloxseg5ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg5_mask_nxv2i16_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vloxseg5ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg6_mask_nxv2i16_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vloxseg6ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg6_mask_nxv2i16_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vloxseg6ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg6_mask_nxv2i16_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vloxseg6ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg7_mask_nxv2i16_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vloxseg7ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg7_mask_nxv2i16_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vloxseg7ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg7_mask_nxv2i16_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vloxseg7ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg8_mask_nxv2i16_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vloxseg8ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg8_mask_nxv2i16_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vloxseg8ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg8_mask_nxv2i16_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vloxseg8ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-NEXT: vmv2r.v v6, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; CHECK-NEXT: vloxseg2ei16.v v6, (a0), v10, v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vloxseg2.mask.nxv4i32.nxv4i16(<vscale x 4 x i32> %val,<vscale x 4 x i32> %val, i32* %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vmv2r.v v6, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; CHECK-NEXT: vloxseg2ei8.v v6, (a0), v10, v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vloxseg2.mask.nxv4i32.nxv4i8(<vscale x 4 x i32> %val,<vscale x 4 x i32> %val, i32* %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vmv2r.v v6, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; CHECK-NEXT: vloxseg2ei32.v v6, (a0), v10, v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vloxseg2.mask.nxv4i32.nxv4i32(<vscale x 4 x i32> %val,<vscale x 4 x i32> %val, i32* %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
define <vscale x 4 x i32> @test_vloxseg3_mask_nxv4i32_nxv4i16(<vscale x 4 x i32> %val, i32* %base, <vscale x 4 x i16> %index, i32 %vl, <vscale x 4 x i1> %mask) {
; CHECK-LABEL: test_vloxseg3_mask_nxv4i32_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v6, v8
+; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
-; CHECK-NEXT: vloxseg3ei16.v v12, (a0), v10, v0.t
-; CHECK-NEXT: vmv2r.v v8, v14
+; CHECK-NEXT: vloxseg3ei16.v v6, (a0), v12, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vloxseg3.mask.nxv4i32.nxv4i16(<vscale x 4 x i32> %val,<vscale x 4 x i32> %val,<vscale x 4 x i32> %val, i32* %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
define <vscale x 4 x i32> @test_vloxseg3_mask_nxv4i32_nxv4i8(<vscale x 4 x i32> %val, i32* %base, <vscale x 4 x i8> %index, i32 %vl, <vscale x 4 x i1> %mask) {
; CHECK-LABEL: test_vloxseg3_mask_nxv4i32_nxv4i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v6, v8
+; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
-; CHECK-NEXT: vloxseg3ei8.v v12, (a0), v10, v0.t
-; CHECK-NEXT: vmv2r.v v8, v14
+; CHECK-NEXT: vloxseg3ei8.v v6, (a0), v12, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vloxseg3.mask.nxv4i32.nxv4i8(<vscale x 4 x i32> %val,<vscale x 4 x i32> %val,<vscale x 4 x i32> %val, i32* %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
define <vscale x 4 x i32> @test_vloxseg3_mask_nxv4i32_nxv4i32(<vscale x 4 x i32> %val, i32* %base, <vscale x 4 x i32> %index, i32 %vl, <vscale x 4 x i1> %mask) {
; CHECK-LABEL: test_vloxseg3_mask_nxv4i32_nxv4i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v6, v8
+; CHECK-NEXT: vmv2r.v v12, v10
+; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
-; CHECK-NEXT: vloxseg3ei32.v v12, (a0), v10, v0.t
-; CHECK-NEXT: vmv2r.v v8, v14
+; CHECK-NEXT: vloxseg3ei32.v v6, (a0), v12, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vloxseg3.mask.nxv4i32.nxv4i32(<vscale x 4 x i32> %val,<vscale x 4 x i32> %val,<vscale x 4 x i32> %val, i32* %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
; CHECK-LABEL: test_vloxseg4_mask_nxv4i32_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; CHECK-NEXT: vloxseg4ei16.v v12, (a0), v10, v0.t
; CHECK-NEXT: vmv2r.v v8, v14
; CHECK-LABEL: test_vloxseg4_mask_nxv4i32_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; CHECK-NEXT: vloxseg4ei8.v v12, (a0), v10, v0.t
; CHECK-NEXT: vmv2r.v v8, v14
; CHECK-LABEL: test_vloxseg4_mask_nxv4i32_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; CHECK-NEXT: vloxseg4ei32.v v12, (a0), v10, v0.t
; CHECK-NEXT: vmv2r.v v8, v14
; CHECK-NEXT: vmv4r.v v4, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
; CHECK-NEXT: vloxseg2ei16.v v4, (a0), v12, v0.t
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 16 x half>,<vscale x 16 x half>} @llvm.riscv.vloxseg2.mask.nxv16f16.nxv16i16(<vscale x 16 x half> %val,<vscale x 16 x half> %val, half* %base, <vscale x 16 x i16> %index, <vscale x 16 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vmv4r.v v4, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
; CHECK-NEXT: vloxseg2ei8.v v4, (a0), v12, v0.t
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 16 x half>,<vscale x 16 x half>} @llvm.riscv.vloxseg2.mask.nxv16f16.nxv16i8(<vscale x 16 x half> %val,<vscale x 16 x half> %val, half* %base, <vscale x 16 x i8> %index, <vscale x 16 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vmv4r.v v4, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
; CHECK-NEXT: vloxseg2ei32.v v4, (a0), v16, v0.t
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 16 x half>,<vscale x 16 x half>} @llvm.riscv.vloxseg2.mask.nxv16f16.nxv16i32(<vscale x 16 x half> %val,<vscale x 16 x half> %val, half* %base, <vscale x 16 x i32> %index, <vscale x 16 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vmv4r.v v4, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu
; CHECK-NEXT: vloxseg2ei16.v v4, (a0), v12, v0.t
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x double>,<vscale x 4 x double>} @llvm.riscv.vloxseg2.mask.nxv4f64.nxv4i16(<vscale x 4 x double> %val,<vscale x 4 x double> %val, double* %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vmv4r.v v4, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu
; CHECK-NEXT: vloxseg2ei8.v v4, (a0), v12, v0.t
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x double>,<vscale x 4 x double>} @llvm.riscv.vloxseg2.mask.nxv4f64.nxv4i8(<vscale x 4 x double> %val,<vscale x 4 x double> %val, double* %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vmv4r.v v4, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu
; CHECK-NEXT: vloxseg2ei32.v v4, (a0), v12, v0.t
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x double>,<vscale x 4 x double>} @llvm.riscv.vloxseg2.mask.nxv4f64.nxv4i32(<vscale x 4 x double> %val,<vscale x 4 x double> %val, double* %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vloxseg2ei8.v v7, (a0), v9, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vloxseg2.mask.nxv1f64.nxv1i8(<vscale x 1 x double> %val,<vscale x 1 x double> %val, double* %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vloxseg2ei32.v v7, (a0), v9, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vloxseg2.mask.nxv1f64.nxv1i32(<vscale x 1 x double> %val,<vscale x 1 x double> %val, double* %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vloxseg2ei16.v v7, (a0), v9, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vloxseg2.mask.nxv1f64.nxv1i16(<vscale x 1 x double> %val,<vscale x 1 x double> %val, double* %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
define <vscale x 1 x double> @test_vloxseg3_mask_nxv1f64_nxv1i8(<vscale x 1 x double> %val, double* %base, <vscale x 1 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
; CHECK-LABEL: test_vloxseg3_mask_nxv1f64_nxv1i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v10, v9
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
-; CHECK-NEXT: vloxseg3ei8.v v10, (a0), v9, v0.t
-; CHECK-NEXT: vmv1r.v v8, v11
+; CHECK-NEXT: vloxseg3ei8.v v7, (a0), v10, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vloxseg3.mask.nxv1f64.nxv1i8(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, double* %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
define <vscale x 1 x double> @test_vloxseg3_mask_nxv1f64_nxv1i32(<vscale x 1 x double> %val, double* %base, <vscale x 1 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
; CHECK-LABEL: test_vloxseg3_mask_nxv1f64_nxv1i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v10, v9
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
-; CHECK-NEXT: vloxseg3ei32.v v10, (a0), v9, v0.t
-; CHECK-NEXT: vmv1r.v v8, v11
+; CHECK-NEXT: vloxseg3ei32.v v7, (a0), v10, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vloxseg3.mask.nxv1f64.nxv1i32(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, double* %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
define <vscale x 1 x double> @test_vloxseg3_mask_nxv1f64_nxv1i16(<vscale x 1 x double> %val, double* %base, <vscale x 1 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
; CHECK-LABEL: test_vloxseg3_mask_nxv1f64_nxv1i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v10, v9
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
-; CHECK-NEXT: vloxseg3ei16.v v10, (a0), v9, v0.t
-; CHECK-NEXT: vmv1r.v v8, v11
+; CHECK-NEXT: vloxseg3ei16.v v7, (a0), v10, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vloxseg3.mask.nxv1f64.nxv1i16(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, double* %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
; CHECK-LABEL: test_vloxseg4_mask_nxv1f64_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vloxseg4ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg4_mask_nxv1f64_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vloxseg4ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg4_mask_nxv1f64_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vloxseg4ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg5_mask_nxv1f64_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vloxseg5ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg5_mask_nxv1f64_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vloxseg5ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg5_mask_nxv1f64_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vloxseg5ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg6_mask_nxv1f64_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vloxseg6ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg6_mask_nxv1f64_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vloxseg6ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg6_mask_nxv1f64_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vloxseg6ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg7_mask_nxv1f64_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vloxseg7ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg7_mask_nxv1f64_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vloxseg7ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg7_mask_nxv1f64_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vloxseg7ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg8_mask_nxv1f64_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vloxseg8ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg8_mask_nxv1f64_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vloxseg8ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg8_mask_nxv1f64_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vloxseg8ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vloxseg2ei32.v v7, (a0), v9, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vloxseg2.mask.nxv2f32.nxv2i32(<vscale x 2 x float> %val,<vscale x 2 x float> %val, float* %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vloxseg2ei8.v v7, (a0), v9, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vloxseg2.mask.nxv2f32.nxv2i8(<vscale x 2 x float> %val,<vscale x 2 x float> %val, float* %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vloxseg2ei16.v v7, (a0), v9, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vloxseg2.mask.nxv2f32.nxv2i16(<vscale x 2 x float> %val,<vscale x 2 x float> %val, float* %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
define <vscale x 2 x float> @test_vloxseg3_mask_nxv2f32_nxv2i32(<vscale x 2 x float> %val, float* %base, <vscale x 2 x i32> %index, i32 %vl, <vscale x 2 x i1> %mask) {
; CHECK-LABEL: test_vloxseg3_mask_nxv2f32_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v10, v9
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
-; CHECK-NEXT: vloxseg3ei32.v v10, (a0), v9, v0.t
-; CHECK-NEXT: vmv1r.v v8, v11
+; CHECK-NEXT: vloxseg3ei32.v v7, (a0), v10, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vloxseg3.mask.nxv2f32.nxv2i32(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, float* %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
define <vscale x 2 x float> @test_vloxseg3_mask_nxv2f32_nxv2i8(<vscale x 2 x float> %val, float* %base, <vscale x 2 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
; CHECK-LABEL: test_vloxseg3_mask_nxv2f32_nxv2i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v10, v9
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
-; CHECK-NEXT: vloxseg3ei8.v v10, (a0), v9, v0.t
-; CHECK-NEXT: vmv1r.v v8, v11
+; CHECK-NEXT: vloxseg3ei8.v v7, (a0), v10, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vloxseg3.mask.nxv2f32.nxv2i8(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, float* %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
define <vscale x 2 x float> @test_vloxseg3_mask_nxv2f32_nxv2i16(<vscale x 2 x float> %val, float* %base, <vscale x 2 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
; CHECK-LABEL: test_vloxseg3_mask_nxv2f32_nxv2i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v10, v9
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
-; CHECK-NEXT: vloxseg3ei16.v v10, (a0), v9, v0.t
-; CHECK-NEXT: vmv1r.v v8, v11
+; CHECK-NEXT: vloxseg3ei16.v v7, (a0), v10, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vloxseg3.mask.nxv2f32.nxv2i16(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, float* %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
; CHECK-LABEL: test_vloxseg4_mask_nxv2f32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vloxseg4ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg4_mask_nxv2f32_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vloxseg4ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg4_mask_nxv2f32_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vloxseg4ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg5_mask_nxv2f32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vloxseg5ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg5_mask_nxv2f32_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vloxseg5ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg5_mask_nxv2f32_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vloxseg5ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg6_mask_nxv2f32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vloxseg6ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg6_mask_nxv2f32_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vloxseg6ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg6_mask_nxv2f32_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vloxseg6ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg7_mask_nxv2f32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vloxseg7ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg7_mask_nxv2f32_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vloxseg7ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg7_mask_nxv2f32_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vloxseg7ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg8_mask_nxv2f32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vloxseg8ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg8_mask_nxv2f32_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vloxseg8ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg8_mask_nxv2f32_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vloxseg8ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vloxseg2ei8.v v7, (a0), v9, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vloxseg2.mask.nxv1f16.nxv1i8(<vscale x 1 x half> %val,<vscale x 1 x half> %val, half* %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vloxseg2ei32.v v7, (a0), v9, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vloxseg2.mask.nxv1f16.nxv1i32(<vscale x 1 x half> %val,<vscale x 1 x half> %val, half* %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vloxseg2ei16.v v7, (a0), v9, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vloxseg2.mask.nxv1f16.nxv1i16(<vscale x 1 x half> %val,<vscale x 1 x half> %val, half* %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
define <vscale x 1 x half> @test_vloxseg3_mask_nxv1f16_nxv1i8(<vscale x 1 x half> %val, half* %base, <vscale x 1 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
; CHECK-LABEL: test_vloxseg3_mask_nxv1f16_nxv1i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v10, v9
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
-; CHECK-NEXT: vloxseg3ei8.v v10, (a0), v9, v0.t
-; CHECK-NEXT: vmv1r.v v8, v11
+; CHECK-NEXT: vloxseg3ei8.v v7, (a0), v10, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vloxseg3.mask.nxv1f16.nxv1i8(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, half* %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
define <vscale x 1 x half> @test_vloxseg3_mask_nxv1f16_nxv1i32(<vscale x 1 x half> %val, half* %base, <vscale x 1 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
; CHECK-LABEL: test_vloxseg3_mask_nxv1f16_nxv1i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v10, v9
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
-; CHECK-NEXT: vloxseg3ei32.v v10, (a0), v9, v0.t
-; CHECK-NEXT: vmv1r.v v8, v11
+; CHECK-NEXT: vloxseg3ei32.v v7, (a0), v10, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vloxseg3.mask.nxv1f16.nxv1i32(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, half* %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
define <vscale x 1 x half> @test_vloxseg3_mask_nxv1f16_nxv1i16(<vscale x 1 x half> %val, half* %base, <vscale x 1 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
; CHECK-LABEL: test_vloxseg3_mask_nxv1f16_nxv1i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v10, v9
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
-; CHECK-NEXT: vloxseg3ei16.v v10, (a0), v9, v0.t
-; CHECK-NEXT: vmv1r.v v8, v11
+; CHECK-NEXT: vloxseg3ei16.v v7, (a0), v10, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vloxseg3.mask.nxv1f16.nxv1i16(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, half* %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
; CHECK-LABEL: test_vloxseg4_mask_nxv1f16_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vloxseg4ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg4_mask_nxv1f16_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vloxseg4ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg4_mask_nxv1f16_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vloxseg4ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg5_mask_nxv1f16_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vloxseg5ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg5_mask_nxv1f16_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vloxseg5ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg5_mask_nxv1f16_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vloxseg5ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg6_mask_nxv1f16_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vloxseg6ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg6_mask_nxv1f16_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vloxseg6ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg6_mask_nxv1f16_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vloxseg6ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg7_mask_nxv1f16_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vloxseg7ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg7_mask_nxv1f16_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vloxseg7ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg7_mask_nxv1f16_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vloxseg7ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg8_mask_nxv1f16_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vloxseg8ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg8_mask_nxv1f16_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vloxseg8ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg8_mask_nxv1f16_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vloxseg8ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vloxseg2ei8.v v7, (a0), v9, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vloxseg2.mask.nxv1f32.nxv1i8(<vscale x 1 x float> %val,<vscale x 1 x float> %val, float* %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vloxseg2ei32.v v7, (a0), v9, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vloxseg2.mask.nxv1f32.nxv1i32(<vscale x 1 x float> %val,<vscale x 1 x float> %val, float* %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vloxseg2ei16.v v7, (a0), v9, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vloxseg2.mask.nxv1f32.nxv1i16(<vscale x 1 x float> %val,<vscale x 1 x float> %val, float* %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
define <vscale x 1 x float> @test_vloxseg3_mask_nxv1f32_nxv1i8(<vscale x 1 x float> %val, float* %base, <vscale x 1 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
; CHECK-LABEL: test_vloxseg3_mask_nxv1f32_nxv1i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v10, v9
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
-; CHECK-NEXT: vloxseg3ei8.v v10, (a0), v9, v0.t
-; CHECK-NEXT: vmv1r.v v8, v11
+; CHECK-NEXT: vloxseg3ei8.v v7, (a0), v10, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vloxseg3.mask.nxv1f32.nxv1i8(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, float* %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
define <vscale x 1 x float> @test_vloxseg3_mask_nxv1f32_nxv1i32(<vscale x 1 x float> %val, float* %base, <vscale x 1 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
; CHECK-LABEL: test_vloxseg3_mask_nxv1f32_nxv1i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v10, v9
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
-; CHECK-NEXT: vloxseg3ei32.v v10, (a0), v9, v0.t
-; CHECK-NEXT: vmv1r.v v8, v11
+; CHECK-NEXT: vloxseg3ei32.v v7, (a0), v10, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vloxseg3.mask.nxv1f32.nxv1i32(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, float* %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
define <vscale x 1 x float> @test_vloxseg3_mask_nxv1f32_nxv1i16(<vscale x 1 x float> %val, float* %base, <vscale x 1 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
; CHECK-LABEL: test_vloxseg3_mask_nxv1f32_nxv1i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v10, v9
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
-; CHECK-NEXT: vloxseg3ei16.v v10, (a0), v9, v0.t
-; CHECK-NEXT: vmv1r.v v8, v11
+; CHECK-NEXT: vloxseg3ei16.v v7, (a0), v10, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vloxseg3.mask.nxv1f32.nxv1i16(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, float* %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
; CHECK-LABEL: test_vloxseg4_mask_nxv1f32_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vloxseg4ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg4_mask_nxv1f32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vloxseg4ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg4_mask_nxv1f32_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vloxseg4ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg5_mask_nxv1f32_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vloxseg5ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg5_mask_nxv1f32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vloxseg5ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg5_mask_nxv1f32_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vloxseg5ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg6_mask_nxv1f32_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vloxseg6ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg6_mask_nxv1f32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vloxseg6ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg6_mask_nxv1f32_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vloxseg6ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg7_mask_nxv1f32_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vloxseg7ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg7_mask_nxv1f32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vloxseg7ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg7_mask_nxv1f32_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vloxseg7ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg8_mask_nxv1f32_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vloxseg8ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg8_mask_nxv1f32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vloxseg8ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg8_mask_nxv1f32_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vloxseg8ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-NEXT: vmv2r.v v6, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
; CHECK-NEXT: vloxseg2ei16.v v6, (a0), v10, v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vloxseg2.mask.nxv8f16.nxv8i16(<vscale x 8 x half> %val,<vscale x 8 x half> %val, half* %base, <vscale x 8 x i16> %index, <vscale x 8 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vmv2r.v v6, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
; CHECK-NEXT: vloxseg2ei8.v v6, (a0), v10, v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vloxseg2.mask.nxv8f16.nxv8i8(<vscale x 8 x half> %val,<vscale x 8 x half> %val, half* %base, <vscale x 8 x i8> %index, <vscale x 8 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vmv2r.v v6, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
; CHECK-NEXT: vloxseg2ei32.v v6, (a0), v12, v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vloxseg2.mask.nxv8f16.nxv8i32(<vscale x 8 x half> %val,<vscale x 8 x half> %val, half* %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i32 %vl, i32 1)
define <vscale x 8 x half> @test_vloxseg3_mask_nxv8f16_nxv8i16(<vscale x 8 x half> %val, half* %base, <vscale x 8 x i16> %index, i32 %vl, <vscale x 8 x i1> %mask) {
; CHECK-LABEL: test_vloxseg3_mask_nxv8f16_nxv8i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v6, v8
+; CHECK-NEXT: vmv2r.v v12, v10
+; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
-; CHECK-NEXT: vloxseg3ei16.v v12, (a0), v10, v0.t
-; CHECK-NEXT: vmv2r.v v8, v14
+; CHECK-NEXT: vloxseg3ei16.v v6, (a0), v12, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vloxseg3.mask.nxv8f16.nxv8i16(<vscale x 8 x half> %val,<vscale x 8 x half> %val,<vscale x 8 x half> %val, half* %base, <vscale x 8 x i16> %index, <vscale x 8 x i1> %mask, i32 %vl, i32 1)
define <vscale x 8 x half> @test_vloxseg3_mask_nxv8f16_nxv8i8(<vscale x 8 x half> %val, half* %base, <vscale x 8 x i8> %index, i32 %vl, <vscale x 8 x i1> %mask) {
; CHECK-LABEL: test_vloxseg3_mask_nxv8f16_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v6, v8
+; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
-; CHECK-NEXT: vloxseg3ei8.v v12, (a0), v10, v0.t
-; CHECK-NEXT: vmv2r.v v8, v14
+; CHECK-NEXT: vloxseg3ei8.v v6, (a0), v12, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vloxseg3.mask.nxv8f16.nxv8i8(<vscale x 8 x half> %val,<vscale x 8 x half> %val,<vscale x 8 x half> %val, half* %base, <vscale x 8 x i8> %index, <vscale x 8 x i1> %mask, i32 %vl, i32 1)
; CHECK-LABEL: test_vloxseg3_mask_nxv8f16_nxv8i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v6, v8
-; CHECK-NEXT: vmv2r.v v10, v6
+; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
; CHECK-NEXT: vloxseg3ei32.v v6, (a0), v12, v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vloxseg3.mask.nxv8f16.nxv8i32(<vscale x 8 x half> %val,<vscale x 8 x half> %val,<vscale x 8 x half> %val, half* %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i32 %vl, i32 1)
; CHECK-LABEL: test_vloxseg4_mask_nxv8f16_nxv8i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
; CHECK-NEXT: vloxseg4ei16.v v12, (a0), v10, v0.t
; CHECK-NEXT: vmv2r.v v8, v14
; CHECK-LABEL: test_vloxseg4_mask_nxv8f16_nxv8i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
; CHECK-NEXT: vloxseg4ei8.v v12, (a0), v10, v0.t
; CHECK-NEXT: vmv2r.v v8, v14
define <vscale x 8 x half> @test_vloxseg4_mask_nxv8f16_nxv8i32(<vscale x 8 x half> %val, half* %base, <vscale x 8 x i32> %index, i32 %vl, <vscale x 8 x i1> %mask) {
; CHECK-LABEL: test_vloxseg4_mask_nxv8f16_nxv8i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv2r.v v16, v8
-; CHECK-NEXT: vmv2r.v v18, v16
-; CHECK-NEXT: vmv2r.v v20, v16
-; CHECK-NEXT: vmv2r.v v22, v16
+; CHECK-NEXT: vmv2r.v v6, v8
+; CHECK-NEXT: vmv2r.v v10, v8
+; CHECK-NEXT: vmv4r.v v16, v12
+; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
-; CHECK-NEXT: vloxseg4ei32.v v16, (a0), v12, v0.t
-; CHECK-NEXT: vmv2r.v v8, v18
+; CHECK-NEXT: vloxseg4ei32.v v6, (a0), v16, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vloxseg4.mask.nxv8f16.nxv8i32(<vscale x 8 x half> %val,<vscale x 8 x half> %val,<vscale x 8 x half> %val,<vscale x 8 x half> %val, half* %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vmv4r.v v4, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
; CHECK-NEXT: vloxseg2ei16.v v4, (a0), v12, v0.t
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x float>,<vscale x 8 x float>} @llvm.riscv.vloxseg2.mask.nxv8f32.nxv8i16(<vscale x 8 x float> %val,<vscale x 8 x float> %val, float* %base, <vscale x 8 x i16> %index, <vscale x 8 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vmv4r.v v4, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
; CHECK-NEXT: vloxseg2ei8.v v4, (a0), v12, v0.t
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x float>,<vscale x 8 x float>} @llvm.riscv.vloxseg2.mask.nxv8f32.nxv8i8(<vscale x 8 x float> %val,<vscale x 8 x float> %val, float* %base, <vscale x 8 x i8> %index, <vscale x 8 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vmv4r.v v4, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
; CHECK-NEXT: vloxseg2ei32.v v4, (a0), v12, v0.t
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x float>,<vscale x 8 x float>} @llvm.riscv.vloxseg2.mask.nxv8f32.nxv8i32(<vscale x 8 x float> %val,<vscale x 8 x float> %val, float* %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vmv2r.v v6, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
; CHECK-NEXT: vloxseg2ei32.v v6, (a0), v10, v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vloxseg2.mask.nxv2f64.nxv2i32(<vscale x 2 x double> %val,<vscale x 2 x double> %val, double* %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vmv2r.v v6, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
; CHECK-NEXT: vloxseg2ei8.v v6, (a0), v10, v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vloxseg2.mask.nxv2f64.nxv2i8(<vscale x 2 x double> %val,<vscale x 2 x double> %val, double* %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vmv2r.v v6, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
; CHECK-NEXT: vloxseg2ei16.v v6, (a0), v10, v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vloxseg2.mask.nxv2f64.nxv2i16(<vscale x 2 x double> %val,<vscale x 2 x double> %val, double* %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
define <vscale x 2 x double> @test_vloxseg3_mask_nxv2f64_nxv2i32(<vscale x 2 x double> %val, double* %base, <vscale x 2 x i32> %index, i32 %vl, <vscale x 2 x i1> %mask) {
; CHECK-LABEL: test_vloxseg3_mask_nxv2f64_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v6, v8
+; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
-; CHECK-NEXT: vloxseg3ei32.v v12, (a0), v10, v0.t
-; CHECK-NEXT: vmv2r.v v8, v14
+; CHECK-NEXT: vloxseg3ei32.v v6, (a0), v12, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vloxseg3.mask.nxv2f64.nxv2i32(<vscale x 2 x double> %val,<vscale x 2 x double> %val,<vscale x 2 x double> %val, double* %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
define <vscale x 2 x double> @test_vloxseg3_mask_nxv2f64_nxv2i8(<vscale x 2 x double> %val, double* %base, <vscale x 2 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
; CHECK-LABEL: test_vloxseg3_mask_nxv2f64_nxv2i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v6, v8
+; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
-; CHECK-NEXT: vloxseg3ei8.v v12, (a0), v10, v0.t
-; CHECK-NEXT: vmv2r.v v8, v14
+; CHECK-NEXT: vloxseg3ei8.v v6, (a0), v12, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vloxseg3.mask.nxv2f64.nxv2i8(<vscale x 2 x double> %val,<vscale x 2 x double> %val,<vscale x 2 x double> %val, double* %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
define <vscale x 2 x double> @test_vloxseg3_mask_nxv2f64_nxv2i16(<vscale x 2 x double> %val, double* %base, <vscale x 2 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
; CHECK-LABEL: test_vloxseg3_mask_nxv2f64_nxv2i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v6, v8
+; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
-; CHECK-NEXT: vloxseg3ei16.v v12, (a0), v10, v0.t
-; CHECK-NEXT: vmv2r.v v8, v14
+; CHECK-NEXT: vloxseg3ei16.v v6, (a0), v12, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vloxseg3.mask.nxv2f64.nxv2i16(<vscale x 2 x double> %val,<vscale x 2 x double> %val,<vscale x 2 x double> %val, double* %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
; CHECK-LABEL: test_vloxseg4_mask_nxv2f64_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
; CHECK-NEXT: vloxseg4ei32.v v12, (a0), v10, v0.t
; CHECK-NEXT: vmv2r.v v8, v14
; CHECK-LABEL: test_vloxseg4_mask_nxv2f64_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
; CHECK-NEXT: vloxseg4ei8.v v12, (a0), v10, v0.t
; CHECK-NEXT: vmv2r.v v8, v14
; CHECK-LABEL: test_vloxseg4_mask_nxv2f64_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
; CHECK-NEXT: vloxseg4ei16.v v12, (a0), v10, v0.t
; CHECK-NEXT: vmv2r.v v8, v14
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vloxseg2ei16.v v7, (a0), v9, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vloxseg2.mask.nxv4f16.nxv4i16(<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vloxseg2ei8.v v7, (a0), v9, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vloxseg2.mask.nxv4f16.nxv4i8(<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vloxseg2ei32.v v7, (a0), v10, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vloxseg2.mask.nxv4f16.nxv4i32(<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
define <vscale x 4 x half> @test_vloxseg3_mask_nxv4f16_nxv4i16(<vscale x 4 x half> %val, half* %base, <vscale x 4 x i16> %index, i32 %vl, <vscale x 4 x i1> %mask) {
; CHECK-LABEL: test_vloxseg3_mask_nxv4f16_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v10, v9
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
-; CHECK-NEXT: vloxseg3ei16.v v10, (a0), v9, v0.t
-; CHECK-NEXT: vmv1r.v v8, v11
+; CHECK-NEXT: vloxseg3ei16.v v7, (a0), v10, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vloxseg3.mask.nxv4f16.nxv4i16(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
define <vscale x 4 x half> @test_vloxseg3_mask_nxv4f16_nxv4i8(<vscale x 4 x half> %val, half* %base, <vscale x 4 x i8> %index, i32 %vl, <vscale x 4 x i1> %mask) {
; CHECK-LABEL: test_vloxseg3_mask_nxv4f16_nxv4i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v10, v9
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
-; CHECK-NEXT: vloxseg3ei8.v v10, (a0), v9, v0.t
-; CHECK-NEXT: vmv1r.v v8, v11
+; CHECK-NEXT: vloxseg3ei8.v v7, (a0), v10, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vloxseg3.mask.nxv4f16.nxv4i8(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
; CHECK-LABEL: test_vloxseg3_mask_nxv4f16_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vloxseg3ei32.v v7, (a0), v10, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vloxseg3.mask.nxv4f16.nxv4i32(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
; CHECK-LABEL: test_vloxseg4_mask_nxv4f16_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vloxseg4ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg4_mask_nxv4f16_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vloxseg4ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
define <vscale x 4 x half> @test_vloxseg4_mask_nxv4f16_nxv4i32(<vscale x 4 x half> %val, half* %base, <vscale x 4 x i32> %index, i32 %vl, <vscale x 4 x i1> %mask) {
; CHECK-LABEL: test_vloxseg4_mask_nxv4f16_nxv4i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv2r.v v12, v10
+; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
-; CHECK-NEXT: vloxseg4ei32.v v12, (a0), v10, v0.t
-; CHECK-NEXT: vmv1r.v v8, v13
+; CHECK-NEXT: vloxseg4ei32.v v7, (a0), v12, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vloxseg4.mask.nxv4f16.nxv4i32(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
; CHECK-LABEL: test_vloxseg5_mask_nxv4f16_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vloxseg5ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg5_mask_nxv4f16_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vloxseg5ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg5_mask_nxv4f16_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vloxseg5ei32.v v12, (a0), v10, v0.t
; CHECK-NEXT: vmv1r.v v8, v13
; CHECK-LABEL: test_vloxseg6_mask_nxv4f16_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vloxseg6ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg6_mask_nxv4f16_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vloxseg6ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg6_mask_nxv4f16_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vloxseg6ei32.v v12, (a0), v10, v0.t
; CHECK-NEXT: vmv1r.v v8, v13
; CHECK-LABEL: test_vloxseg7_mask_nxv4f16_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vloxseg7ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg7_mask_nxv4f16_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vloxseg7ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg7_mask_nxv4f16_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
-; CHECK-NEXT: vmv1r.v v18, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vloxseg7ei32.v v12, (a0), v10, v0.t
; CHECK-NEXT: vmv1r.v v8, v13
; CHECK-LABEL: test_vloxseg8_mask_nxv4f16_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vloxseg8ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg8_mask_nxv4f16_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vloxseg8ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg8_mask_nxv4f16_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
-; CHECK-NEXT: vmv1r.v v18, v12
-; CHECK-NEXT: vmv1r.v v19, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
+; CHECK-NEXT: vmv1r.v v19, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vloxseg8ei32.v v12, (a0), v10, v0.t
; CHECK-NEXT: vmv1r.v v8, v13
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vloxseg2ei32.v v7, (a0), v9, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vloxseg2.mask.nxv2f16.nxv2i32(<vscale x 2 x half> %val,<vscale x 2 x half> %val, half* %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vloxseg2ei8.v v7, (a0), v9, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vloxseg2.mask.nxv2f16.nxv2i8(<vscale x 2 x half> %val,<vscale x 2 x half> %val, half* %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vloxseg2ei16.v v7, (a0), v9, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vloxseg2.mask.nxv2f16.nxv2i16(<vscale x 2 x half> %val,<vscale x 2 x half> %val, half* %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
define <vscale x 2 x half> @test_vloxseg3_mask_nxv2f16_nxv2i32(<vscale x 2 x half> %val, half* %base, <vscale x 2 x i32> %index, i32 %vl, <vscale x 2 x i1> %mask) {
; CHECK-LABEL: test_vloxseg3_mask_nxv2f16_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v10, v9
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
-; CHECK-NEXT: vloxseg3ei32.v v10, (a0), v9, v0.t
-; CHECK-NEXT: vmv1r.v v8, v11
+; CHECK-NEXT: vloxseg3ei32.v v7, (a0), v10, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vloxseg3.mask.nxv2f16.nxv2i32(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, half* %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
define <vscale x 2 x half> @test_vloxseg3_mask_nxv2f16_nxv2i8(<vscale x 2 x half> %val, half* %base, <vscale x 2 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
; CHECK-LABEL: test_vloxseg3_mask_nxv2f16_nxv2i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v10, v9
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
-; CHECK-NEXT: vloxseg3ei8.v v10, (a0), v9, v0.t
-; CHECK-NEXT: vmv1r.v v8, v11
+; CHECK-NEXT: vloxseg3ei8.v v7, (a0), v10, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vloxseg3.mask.nxv2f16.nxv2i8(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, half* %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
define <vscale x 2 x half> @test_vloxseg3_mask_nxv2f16_nxv2i16(<vscale x 2 x half> %val, half* %base, <vscale x 2 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
; CHECK-LABEL: test_vloxseg3_mask_nxv2f16_nxv2i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v10, v9
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
-; CHECK-NEXT: vloxseg3ei16.v v10, (a0), v9, v0.t
-; CHECK-NEXT: vmv1r.v v8, v11
+; CHECK-NEXT: vloxseg3ei16.v v7, (a0), v10, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vloxseg3.mask.nxv2f16.nxv2i16(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, half* %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
; CHECK-LABEL: test_vloxseg4_mask_nxv2f16_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vloxseg4ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg4_mask_nxv2f16_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vloxseg4ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg4_mask_nxv2f16_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vloxseg4ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg5_mask_nxv2f16_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vloxseg5ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg5_mask_nxv2f16_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vloxseg5ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg5_mask_nxv2f16_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vloxseg5ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg6_mask_nxv2f16_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vloxseg6ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg6_mask_nxv2f16_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vloxseg6ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg6_mask_nxv2f16_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vloxseg6ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg7_mask_nxv2f16_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vloxseg7ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg7_mask_nxv2f16_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vloxseg7ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg7_mask_nxv2f16_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vloxseg7ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg8_mask_nxv2f16_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vloxseg8ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg8_mask_nxv2f16_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vloxseg8ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg8_mask_nxv2f16_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vloxseg8ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-NEXT: vmv2r.v v6, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; CHECK-NEXT: vloxseg2ei16.v v6, (a0), v10, v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vloxseg2.mask.nxv4f32.nxv4i16(<vscale x 4 x float> %val,<vscale x 4 x float> %val, float* %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vmv2r.v v6, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; CHECK-NEXT: vloxseg2ei8.v v6, (a0), v10, v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vloxseg2.mask.nxv4f32.nxv4i8(<vscale x 4 x float> %val,<vscale x 4 x float> %val, float* %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vmv2r.v v6, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; CHECK-NEXT: vloxseg2ei32.v v6, (a0), v10, v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vloxseg2.mask.nxv4f32.nxv4i32(<vscale x 4 x float> %val,<vscale x 4 x float> %val, float* %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
define <vscale x 4 x float> @test_vloxseg3_mask_nxv4f32_nxv4i16(<vscale x 4 x float> %val, float* %base, <vscale x 4 x i16> %index, i32 %vl, <vscale x 4 x i1> %mask) {
; CHECK-LABEL: test_vloxseg3_mask_nxv4f32_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v6, v8
+; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
-; CHECK-NEXT: vloxseg3ei16.v v12, (a0), v10, v0.t
-; CHECK-NEXT: vmv2r.v v8, v14
+; CHECK-NEXT: vloxseg3ei16.v v6, (a0), v12, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vloxseg3.mask.nxv4f32.nxv4i16(<vscale x 4 x float> %val,<vscale x 4 x float> %val,<vscale x 4 x float> %val, float* %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
define <vscale x 4 x float> @test_vloxseg3_mask_nxv4f32_nxv4i8(<vscale x 4 x float> %val, float* %base, <vscale x 4 x i8> %index, i32 %vl, <vscale x 4 x i1> %mask) {
; CHECK-LABEL: test_vloxseg3_mask_nxv4f32_nxv4i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v6, v8
+; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
-; CHECK-NEXT: vloxseg3ei8.v v12, (a0), v10, v0.t
-; CHECK-NEXT: vmv2r.v v8, v14
+; CHECK-NEXT: vloxseg3ei8.v v6, (a0), v12, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vloxseg3.mask.nxv4f32.nxv4i8(<vscale x 4 x float> %val,<vscale x 4 x float> %val,<vscale x 4 x float> %val, float* %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
define <vscale x 4 x float> @test_vloxseg3_mask_nxv4f32_nxv4i32(<vscale x 4 x float> %val, float* %base, <vscale x 4 x i32> %index, i32 %vl, <vscale x 4 x i1> %mask) {
; CHECK-LABEL: test_vloxseg3_mask_nxv4f32_nxv4i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v6, v8
+; CHECK-NEXT: vmv2r.v v12, v10
+; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
-; CHECK-NEXT: vloxseg3ei32.v v12, (a0), v10, v0.t
-; CHECK-NEXT: vmv2r.v v8, v14
+; CHECK-NEXT: vloxseg3ei32.v v6, (a0), v12, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vloxseg3.mask.nxv4f32.nxv4i32(<vscale x 4 x float> %val,<vscale x 4 x float> %val,<vscale x 4 x float> %val, float* %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
; CHECK-LABEL: test_vloxseg4_mask_nxv4f32_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; CHECK-NEXT: vloxseg4ei16.v v12, (a0), v10, v0.t
; CHECK-NEXT: vmv2r.v v8, v14
; CHECK-LABEL: test_vloxseg4_mask_nxv4f32_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; CHECK-NEXT: vloxseg4ei8.v v12, (a0), v10, v0.t
; CHECK-NEXT: vmv2r.v v8, v14
; CHECK-LABEL: test_vloxseg4_mask_nxv4f32_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; CHECK-NEXT: vloxseg4ei32.v v12, (a0), v10, v0.t
; CHECK-NEXT: vmv2r.v v8, v14
; CHECK-NEXT: vmv4r.v v4, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
; CHECK-NEXT: vloxseg2ei16.v v4, (a0), v12, v0.t
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 16 x i16>,<vscale x 16 x i16>} @llvm.riscv.vloxseg2.mask.nxv16i16.nxv16i16(<vscale x 16 x i16> %val,<vscale x 16 x i16> %val, i16* %base, <vscale x 16 x i16> %index, <vscale x 16 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv4r.v v4, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
; CHECK-NEXT: vloxseg2ei8.v v4, (a0), v12, v0.t
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 16 x i16>,<vscale x 16 x i16>} @llvm.riscv.vloxseg2.mask.nxv16i16.nxv16i8(<vscale x 16 x i16> %val,<vscale x 16 x i16> %val, i16* %base, <vscale x 16 x i8> %index, <vscale x 16 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv4r.v v4, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
; CHECK-NEXT: vloxseg2ei32.v v4, (a0), v16, v0.t
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 16 x i16>,<vscale x 16 x i16>} @llvm.riscv.vloxseg2.mask.nxv16i16.nxv16i32(<vscale x 16 x i16> %val,<vscale x 16 x i16> %val, i16* %base, <vscale x 16 x i32> %index, <vscale x 16 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv2r.v v6, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; CHECK-NEXT: vloxseg2ei32.v v6, (a0), v10, v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vloxseg2.mask.nxv4i32.nxv4i32(<vscale x 4 x i32> %val,<vscale x 4 x i32> %val, i32* %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv2r.v v6, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; CHECK-NEXT: vloxseg2ei8.v v6, (a0), v10, v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vloxseg2.mask.nxv4i32.nxv4i8(<vscale x 4 x i32> %val,<vscale x 4 x i32> %val, i32* %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv2r.v v6, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; CHECK-NEXT: vloxseg2ei64.v v6, (a0), v12, v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vloxseg2.mask.nxv4i32.nxv4i64(<vscale x 4 x i32> %val,<vscale x 4 x i32> %val, i32* %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv2r.v v6, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; CHECK-NEXT: vloxseg2ei16.v v6, (a0), v10, v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vloxseg2.mask.nxv4i32.nxv4i16(<vscale x 4 x i32> %val,<vscale x 4 x i32> %val, i32* %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
define <vscale x 4 x i32> @test_vloxseg3_mask_nxv4i32_nxv4i32(<vscale x 4 x i32> %val, i32* %base, <vscale x 4 x i32> %index, i64 %vl, <vscale x 4 x i1> %mask) {
; CHECK-LABEL: test_vloxseg3_mask_nxv4i32_nxv4i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v6, v8
+; CHECK-NEXT: vmv2r.v v12, v10
+; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
-; CHECK-NEXT: vloxseg3ei32.v v12, (a0), v10, v0.t
-; CHECK-NEXT: vmv2r.v v8, v14
+; CHECK-NEXT: vloxseg3ei32.v v6, (a0), v12, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vloxseg3.mask.nxv4i32.nxv4i32(<vscale x 4 x i32> %val,<vscale x 4 x i32> %val,<vscale x 4 x i32> %val, i32* %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
define <vscale x 4 x i32> @test_vloxseg3_mask_nxv4i32_nxv4i8(<vscale x 4 x i32> %val, i32* %base, <vscale x 4 x i8> %index, i64 %vl, <vscale x 4 x i1> %mask) {
; CHECK-LABEL: test_vloxseg3_mask_nxv4i32_nxv4i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v6, v8
+; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
-; CHECK-NEXT: vloxseg3ei8.v v12, (a0), v10, v0.t
-; CHECK-NEXT: vmv2r.v v8, v14
+; CHECK-NEXT: vloxseg3ei8.v v6, (a0), v12, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vloxseg3.mask.nxv4i32.nxv4i8(<vscale x 4 x i32> %val,<vscale x 4 x i32> %val,<vscale x 4 x i32> %val, i32* %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
; CHECK-LABEL: test_vloxseg3_mask_nxv4i32_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v6, v8
-; CHECK-NEXT: vmv2r.v v10, v6
+; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; CHECK-NEXT: vloxseg3ei64.v v6, (a0), v12, v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vloxseg3.mask.nxv4i32.nxv4i64(<vscale x 4 x i32> %val,<vscale x 4 x i32> %val,<vscale x 4 x i32> %val, i32* %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
define <vscale x 4 x i32> @test_vloxseg3_mask_nxv4i32_nxv4i16(<vscale x 4 x i32> %val, i32* %base, <vscale x 4 x i16> %index, i64 %vl, <vscale x 4 x i1> %mask) {
; CHECK-LABEL: test_vloxseg3_mask_nxv4i32_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v6, v8
+; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
-; CHECK-NEXT: vloxseg3ei16.v v12, (a0), v10, v0.t
-; CHECK-NEXT: vmv2r.v v8, v14
+; CHECK-NEXT: vloxseg3ei16.v v6, (a0), v12, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vloxseg3.mask.nxv4i32.nxv4i16(<vscale x 4 x i32> %val,<vscale x 4 x i32> %val,<vscale x 4 x i32> %val, i32* %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
; CHECK-LABEL: test_vloxseg4_mask_nxv4i32_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; CHECK-NEXT: vloxseg4ei32.v v12, (a0), v10, v0.t
; CHECK-NEXT: vmv2r.v v8, v14
; CHECK-LABEL: test_vloxseg4_mask_nxv4i32_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; CHECK-NEXT: vloxseg4ei8.v v12, (a0), v10, v0.t
; CHECK-NEXT: vmv2r.v v8, v14
define <vscale x 4 x i32> @test_vloxseg4_mask_nxv4i32_nxv4i64(<vscale x 4 x i32> %val, i32* %base, <vscale x 4 x i64> %index, i64 %vl, <vscale x 4 x i1> %mask) {
; CHECK-LABEL: test_vloxseg4_mask_nxv4i32_nxv4i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv2r.v v16, v8
-; CHECK-NEXT: vmv2r.v v18, v16
-; CHECK-NEXT: vmv2r.v v20, v16
-; CHECK-NEXT: vmv2r.v v22, v16
+; CHECK-NEXT: vmv2r.v v6, v8
+; CHECK-NEXT: vmv2r.v v10, v8
+; CHECK-NEXT: vmv4r.v v16, v12
+; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
-; CHECK-NEXT: vloxseg4ei64.v v16, (a0), v12, v0.t
-; CHECK-NEXT: vmv2r.v v8, v18
+; CHECK-NEXT: vloxseg4ei64.v v6, (a0), v16, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vloxseg4.mask.nxv4i32.nxv4i64(<vscale x 4 x i32> %val,<vscale x 4 x i32> %val,<vscale x 4 x i32> %val,<vscale x 4 x i32> %val, i32* %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
; CHECK-LABEL: test_vloxseg4_mask_nxv4i32_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; CHECK-NEXT: vloxseg4ei16.v v12, (a0), v10, v0.t
; CHECK-NEXT: vmv2r.v v8, v14
; CHECK-NEXT: vmv2r.v v6, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
; CHECK-NEXT: vloxseg2ei16.v v6, (a0), v12, v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vloxseg2.mask.nxv16i8.nxv16i16(<vscale x 16 x i8> %val,<vscale x 16 x i8> %val, i8* %base, <vscale x 16 x i16> %index, <vscale x 16 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv2r.v v6, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
; CHECK-NEXT: vloxseg2ei8.v v6, (a0), v10, v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vloxseg2.mask.nxv16i8.nxv16i8(<vscale x 16 x i8> %val,<vscale x 16 x i8> %val, i8* %base, <vscale x 16 x i8> %index, <vscale x 16 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv2r.v v6, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
; CHECK-NEXT: vloxseg2ei32.v v6, (a0), v16, v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vloxseg2.mask.nxv16i8.nxv16i32(<vscale x 16 x i8> %val,<vscale x 16 x i8> %val, i8* %base, <vscale x 16 x i32> %index, <vscale x 16 x i1> %mask, i64 %vl, i64 1)
; CHECK-LABEL: test_vloxseg3_mask_nxv16i8_nxv16i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v6, v8
-; CHECK-NEXT: vmv2r.v v10, v6
+; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
; CHECK-NEXT: vloxseg3ei16.v v6, (a0), v12, v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vloxseg3.mask.nxv16i8.nxv16i16(<vscale x 16 x i8> %val,<vscale x 16 x i8> %val,<vscale x 16 x i8> %val, i8* %base, <vscale x 16 x i16> %index, <vscale x 16 x i1> %mask, i64 %vl, i64 1)
define <vscale x 16 x i8> @test_vloxseg3_mask_nxv16i8_nxv16i8(<vscale x 16 x i8> %val, i8* %base, <vscale x 16 x i8> %index, i64 %vl, <vscale x 16 x i1> %mask) {
; CHECK-LABEL: test_vloxseg3_mask_nxv16i8_nxv16i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v6, v8
+; CHECK-NEXT: vmv2r.v v12, v10
+; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
-; CHECK-NEXT: vloxseg3ei8.v v12, (a0), v10, v0.t
-; CHECK-NEXT: vmv2r.v v8, v14
+; CHECK-NEXT: vloxseg3ei8.v v6, (a0), v12, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vloxseg3.mask.nxv16i8.nxv16i8(<vscale x 16 x i8> %val,<vscale x 16 x i8> %val,<vscale x 16 x i8> %val, i8* %base, <vscale x 16 x i8> %index, <vscale x 16 x i1> %mask, i64 %vl, i64 1)
; CHECK-LABEL: test_vloxseg3_mask_nxv16i8_nxv16i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v6, v8
-; CHECK-NEXT: vmv2r.v v10, v6
+; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
; CHECK-NEXT: vloxseg3ei32.v v6, (a0), v16, v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vloxseg3.mask.nxv16i8.nxv16i32(<vscale x 16 x i8> %val,<vscale x 16 x i8> %val,<vscale x 16 x i8> %val, i8* %base, <vscale x 16 x i32> %index, <vscale x 16 x i1> %mask, i64 %vl, i64 1)
define <vscale x 16 x i8> @test_vloxseg4_mask_nxv16i8_nxv16i16(<vscale x 16 x i8> %val, i8* %base, <vscale x 16 x i16> %index, i64 %vl, <vscale x 16 x i1> %mask) {
; CHECK-LABEL: test_vloxseg4_mask_nxv16i8_nxv16i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv2r.v v16, v8
-; CHECK-NEXT: vmv2r.v v18, v16
-; CHECK-NEXT: vmv2r.v v20, v16
-; CHECK-NEXT: vmv2r.v v22, v16
+; CHECK-NEXT: vmv2r.v v6, v8
+; CHECK-NEXT: vmv2r.v v10, v8
+; CHECK-NEXT: vmv4r.v v16, v12
+; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
-; CHECK-NEXT: vloxseg4ei16.v v16, (a0), v12, v0.t
-; CHECK-NEXT: vmv2r.v v8, v18
+; CHECK-NEXT: vloxseg4ei16.v v6, (a0), v16, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vloxseg4.mask.nxv16i8.nxv16i16(<vscale x 16 x i8> %val,<vscale x 16 x i8> %val,<vscale x 16 x i8> %val,<vscale x 16 x i8> %val, i8* %base, <vscale x 16 x i16> %index, <vscale x 16 x i1> %mask, i64 %vl, i64 1)
; CHECK-LABEL: test_vloxseg4_mask_nxv16i8_nxv16i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
; CHECK-NEXT: vloxseg4ei8.v v12, (a0), v10, v0.t
; CHECK-NEXT: vmv2r.v v8, v14
; CHECK-LABEL: test_vloxseg4_mask_nxv16i8_nxv16i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v6, v8
-; CHECK-NEXT: vmv2r.v v10, v6
-; CHECK-NEXT: vmv2r.v v12, v6
+; CHECK-NEXT: vmv2r.v v10, v8
+; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
; CHECK-NEXT: vloxseg4ei32.v v6, (a0), v16, v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vloxseg4.mask.nxv16i8.nxv16i32(<vscale x 16 x i8> %val,<vscale x 16 x i8> %val,<vscale x 16 x i8> %val,<vscale x 16 x i8> %val, i8* %base, <vscale x 16 x i32> %index, <vscale x 16 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vloxseg2ei64.v v7, (a0), v9, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vloxseg2.mask.nxv1i64.nxv1i64(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, i64* %base, <vscale x 1 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vloxseg2ei32.v v7, (a0), v9, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vloxseg2.mask.nxv1i64.nxv1i32(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, i64* %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vloxseg2ei16.v v7, (a0), v9, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vloxseg2.mask.nxv1i64.nxv1i16(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, i64* %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vloxseg2ei8.v v7, (a0), v9, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vloxseg2.mask.nxv1i64.nxv1i8(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, i64* %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
define <vscale x 1 x i64> @test_vloxseg3_mask_nxv1i64_nxv1i64(<vscale x 1 x i64> %val, i64* %base, <vscale x 1 x i64> %index, i64 %vl, <vscale x 1 x i1> %mask) {
; CHECK-LABEL: test_vloxseg3_mask_nxv1i64_nxv1i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v10, v9
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
-; CHECK-NEXT: vloxseg3ei64.v v10, (a0), v9, v0.t
-; CHECK-NEXT: vmv1r.v v8, v11
+; CHECK-NEXT: vloxseg3ei64.v v7, (a0), v10, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vloxseg3.mask.nxv1i64.nxv1i64(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, i64* %base, <vscale x 1 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
define <vscale x 1 x i64> @test_vloxseg3_mask_nxv1i64_nxv1i32(<vscale x 1 x i64> %val, i64* %base, <vscale x 1 x i32> %index, i64 %vl, <vscale x 1 x i1> %mask) {
; CHECK-LABEL: test_vloxseg3_mask_nxv1i64_nxv1i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v10, v9
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
-; CHECK-NEXT: vloxseg3ei32.v v10, (a0), v9, v0.t
-; CHECK-NEXT: vmv1r.v v8, v11
+; CHECK-NEXT: vloxseg3ei32.v v7, (a0), v10, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vloxseg3.mask.nxv1i64.nxv1i32(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, i64* %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
define <vscale x 1 x i64> @test_vloxseg3_mask_nxv1i64_nxv1i16(<vscale x 1 x i64> %val, i64* %base, <vscale x 1 x i16> %index, i64 %vl, <vscale x 1 x i1> %mask) {
; CHECK-LABEL: test_vloxseg3_mask_nxv1i64_nxv1i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v10, v9
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
-; CHECK-NEXT: vloxseg3ei16.v v10, (a0), v9, v0.t
-; CHECK-NEXT: vmv1r.v v8, v11
+; CHECK-NEXT: vloxseg3ei16.v v7, (a0), v10, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vloxseg3.mask.nxv1i64.nxv1i16(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, i64* %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
define <vscale x 1 x i64> @test_vloxseg3_mask_nxv1i64_nxv1i8(<vscale x 1 x i64> %val, i64* %base, <vscale x 1 x i8> %index, i64 %vl, <vscale x 1 x i1> %mask) {
; CHECK-LABEL: test_vloxseg3_mask_nxv1i64_nxv1i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v10, v9
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
-; CHECK-NEXT: vloxseg3ei8.v v10, (a0), v9, v0.t
-; CHECK-NEXT: vmv1r.v v8, v11
+; CHECK-NEXT: vloxseg3ei8.v v7, (a0), v10, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vloxseg3.mask.nxv1i64.nxv1i8(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, i64* %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
; CHECK-LABEL: test_vloxseg4_mask_nxv1i64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vloxseg4ei64.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg4_mask_nxv1i64_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vloxseg4ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg4_mask_nxv1i64_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vloxseg4ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg4_mask_nxv1i64_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vloxseg4ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg5_mask_nxv1i64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vloxseg5ei64.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg5_mask_nxv1i64_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vloxseg5ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg5_mask_nxv1i64_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vloxseg5ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg5_mask_nxv1i64_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vloxseg5ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg6_mask_nxv1i64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vloxseg6ei64.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg6_mask_nxv1i64_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vloxseg6ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg6_mask_nxv1i64_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vloxseg6ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg6_mask_nxv1i64_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vloxseg6ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg7_mask_nxv1i64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vloxseg7ei64.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg7_mask_nxv1i64_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vloxseg7ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg7_mask_nxv1i64_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vloxseg7ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg7_mask_nxv1i64_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vloxseg7ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg8_mask_nxv1i64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vloxseg8ei64.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg8_mask_nxv1i64_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vloxseg8ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg8_mask_nxv1i64_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vloxseg8ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg8_mask_nxv1i64_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vloxseg8ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vloxseg2ei64.v v7, (a0), v9, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vloxseg2.mask.nxv1i32.nxv1i64(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 1 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vloxseg2ei32.v v7, (a0), v9, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vloxseg2.mask.nxv1i32.nxv1i32(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vloxseg2ei16.v v7, (a0), v9, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vloxseg2.mask.nxv1i32.nxv1i16(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vloxseg2ei8.v v7, (a0), v9, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vloxseg2.mask.nxv1i32.nxv1i8(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
define <vscale x 1 x i32> @test_vloxseg3_mask_nxv1i32_nxv1i64(<vscale x 1 x i32> %val, i32* %base, <vscale x 1 x i64> %index, i64 %vl, <vscale x 1 x i1> %mask) {
; CHECK-LABEL: test_vloxseg3_mask_nxv1i32_nxv1i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v10, v9
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
-; CHECK-NEXT: vloxseg3ei64.v v10, (a0), v9, v0.t
-; CHECK-NEXT: vmv1r.v v8, v11
+; CHECK-NEXT: vloxseg3ei64.v v7, (a0), v10, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vloxseg3.mask.nxv1i32.nxv1i64(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 1 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
define <vscale x 1 x i32> @test_vloxseg3_mask_nxv1i32_nxv1i32(<vscale x 1 x i32> %val, i32* %base, <vscale x 1 x i32> %index, i64 %vl, <vscale x 1 x i1> %mask) {
; CHECK-LABEL: test_vloxseg3_mask_nxv1i32_nxv1i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v10, v9
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
-; CHECK-NEXT: vloxseg3ei32.v v10, (a0), v9, v0.t
-; CHECK-NEXT: vmv1r.v v8, v11
+; CHECK-NEXT: vloxseg3ei32.v v7, (a0), v10, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vloxseg3.mask.nxv1i32.nxv1i32(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
define <vscale x 1 x i32> @test_vloxseg3_mask_nxv1i32_nxv1i16(<vscale x 1 x i32> %val, i32* %base, <vscale x 1 x i16> %index, i64 %vl, <vscale x 1 x i1> %mask) {
; CHECK-LABEL: test_vloxseg3_mask_nxv1i32_nxv1i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v10, v9
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
-; CHECK-NEXT: vloxseg3ei16.v v10, (a0), v9, v0.t
-; CHECK-NEXT: vmv1r.v v8, v11
+; CHECK-NEXT: vloxseg3ei16.v v7, (a0), v10, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vloxseg3.mask.nxv1i32.nxv1i16(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
define <vscale x 1 x i32> @test_vloxseg3_mask_nxv1i32_nxv1i8(<vscale x 1 x i32> %val, i32* %base, <vscale x 1 x i8> %index, i64 %vl, <vscale x 1 x i1> %mask) {
; CHECK-LABEL: test_vloxseg3_mask_nxv1i32_nxv1i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v10, v9
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
-; CHECK-NEXT: vloxseg3ei8.v v10, (a0), v9, v0.t
-; CHECK-NEXT: vmv1r.v v8, v11
+; CHECK-NEXT: vloxseg3ei8.v v7, (a0), v10, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vloxseg3.mask.nxv1i32.nxv1i8(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
; CHECK-LABEL: test_vloxseg4_mask_nxv1i32_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vloxseg4ei64.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg4_mask_nxv1i32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vloxseg4ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg4_mask_nxv1i32_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vloxseg4ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg4_mask_nxv1i32_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vloxseg4ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg5_mask_nxv1i32_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vloxseg5ei64.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg5_mask_nxv1i32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vloxseg5ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg5_mask_nxv1i32_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vloxseg5ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg5_mask_nxv1i32_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vloxseg5ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg6_mask_nxv1i32_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vloxseg6ei64.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg6_mask_nxv1i32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vloxseg6ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg6_mask_nxv1i32_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vloxseg6ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg6_mask_nxv1i32_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vloxseg6ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg7_mask_nxv1i32_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vloxseg7ei64.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg7_mask_nxv1i32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vloxseg7ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg7_mask_nxv1i32_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vloxseg7ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg7_mask_nxv1i32_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vloxseg7ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg8_mask_nxv1i32_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vloxseg8ei64.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg8_mask_nxv1i32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vloxseg8ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg8_mask_nxv1i32_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vloxseg8ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg8_mask_nxv1i32_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vloxseg8ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-NEXT: vmv2r.v v6, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
; CHECK-NEXT: vloxseg2ei16.v v6, (a0), v10, v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vloxseg2.mask.nxv8i16.nxv8i16(<vscale x 8 x i16> %val,<vscale x 8 x i16> %val, i16* %base, <vscale x 8 x i16> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv2r.v v6, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
; CHECK-NEXT: vloxseg2ei8.v v6, (a0), v10, v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vloxseg2.mask.nxv8i16.nxv8i8(<vscale x 8 x i16> %val,<vscale x 8 x i16> %val, i16* %base, <vscale x 8 x i8> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv2r.v v6, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
; CHECK-NEXT: vloxseg2ei64.v v6, (a0), v16, v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vloxseg2.mask.nxv8i16.nxv8i64(<vscale x 8 x i16> %val,<vscale x 8 x i16> %val, i16* %base, <vscale x 8 x i64> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv2r.v v6, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
; CHECK-NEXT: vloxseg2ei32.v v6, (a0), v12, v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vloxseg2.mask.nxv8i16.nxv8i32(<vscale x 8 x i16> %val,<vscale x 8 x i16> %val, i16* %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
define <vscale x 8 x i16> @test_vloxseg3_mask_nxv8i16_nxv8i16(<vscale x 8 x i16> %val, i16* %base, <vscale x 8 x i16> %index, i64 %vl, <vscale x 8 x i1> %mask) {
; CHECK-LABEL: test_vloxseg3_mask_nxv8i16_nxv8i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v6, v8
+; CHECK-NEXT: vmv2r.v v12, v10
+; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
-; CHECK-NEXT: vloxseg3ei16.v v12, (a0), v10, v0.t
-; CHECK-NEXT: vmv2r.v v8, v14
+; CHECK-NEXT: vloxseg3ei16.v v6, (a0), v12, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vloxseg3.mask.nxv8i16.nxv8i16(<vscale x 8 x i16> %val,<vscale x 8 x i16> %val,<vscale x 8 x i16> %val, i16* %base, <vscale x 8 x i16> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
define <vscale x 8 x i16> @test_vloxseg3_mask_nxv8i16_nxv8i8(<vscale x 8 x i16> %val, i16* %base, <vscale x 8 x i8> %index, i64 %vl, <vscale x 8 x i1> %mask) {
; CHECK-LABEL: test_vloxseg3_mask_nxv8i16_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v6, v8
+; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
-; CHECK-NEXT: vloxseg3ei8.v v12, (a0), v10, v0.t
-; CHECK-NEXT: vmv2r.v v8, v14
+; CHECK-NEXT: vloxseg3ei8.v v6, (a0), v12, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vloxseg3.mask.nxv8i16.nxv8i8(<vscale x 8 x i16> %val,<vscale x 8 x i16> %val,<vscale x 8 x i16> %val, i16* %base, <vscale x 8 x i8> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
; CHECK-LABEL: test_vloxseg3_mask_nxv8i16_nxv8i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v6, v8
-; CHECK-NEXT: vmv2r.v v10, v6
+; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
; CHECK-NEXT: vloxseg3ei64.v v6, (a0), v16, v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vloxseg3.mask.nxv8i16.nxv8i64(<vscale x 8 x i16> %val,<vscale x 8 x i16> %val,<vscale x 8 x i16> %val, i16* %base, <vscale x 8 x i64> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
; CHECK-LABEL: test_vloxseg3_mask_nxv8i16_nxv8i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v6, v8
-; CHECK-NEXT: vmv2r.v v10, v6
+; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
; CHECK-NEXT: vloxseg3ei32.v v6, (a0), v12, v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vloxseg3.mask.nxv8i16.nxv8i32(<vscale x 8 x i16> %val,<vscale x 8 x i16> %val,<vscale x 8 x i16> %val, i16* %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
; CHECK-LABEL: test_vloxseg4_mask_nxv8i16_nxv8i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
; CHECK-NEXT: vloxseg4ei16.v v12, (a0), v10, v0.t
; CHECK-NEXT: vmv2r.v v8, v14
; CHECK-LABEL: test_vloxseg4_mask_nxv8i16_nxv8i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
; CHECK-NEXT: vloxseg4ei8.v v12, (a0), v10, v0.t
; CHECK-NEXT: vmv2r.v v8, v14
; CHECK-LABEL: test_vloxseg4_mask_nxv8i16_nxv8i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v6, v8
-; CHECK-NEXT: vmv2r.v v10, v6
-; CHECK-NEXT: vmv2r.v v12, v6
+; CHECK-NEXT: vmv2r.v v10, v8
+; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
; CHECK-NEXT: vloxseg4ei64.v v6, (a0), v16, v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vloxseg4.mask.nxv8i16.nxv8i64(<vscale x 8 x i16> %val,<vscale x 8 x i16> %val,<vscale x 8 x i16> %val,<vscale x 8 x i16> %val, i16* %base, <vscale x 8 x i64> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
define <vscale x 8 x i16> @test_vloxseg4_mask_nxv8i16_nxv8i32(<vscale x 8 x i16> %val, i16* %base, <vscale x 8 x i32> %index, i64 %vl, <vscale x 8 x i1> %mask) {
; CHECK-LABEL: test_vloxseg4_mask_nxv8i16_nxv8i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv2r.v v16, v8
-; CHECK-NEXT: vmv2r.v v18, v16
-; CHECK-NEXT: vmv2r.v v20, v16
-; CHECK-NEXT: vmv2r.v v22, v16
+; CHECK-NEXT: vmv2r.v v6, v8
+; CHECK-NEXT: vmv2r.v v10, v8
+; CHECK-NEXT: vmv4r.v v16, v12
+; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
-; CHECK-NEXT: vloxseg4ei32.v v16, (a0), v12, v0.t
-; CHECK-NEXT: vmv2r.v v8, v18
+; CHECK-NEXT: vloxseg4ei32.v v6, (a0), v16, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vloxseg4.mask.nxv8i16.nxv8i32(<vscale x 8 x i16> %val,<vscale x 8 x i16> %val,<vscale x 8 x i16> %val,<vscale x 8 x i16> %val, i16* %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vloxseg2ei32.v v7, (a0), v10, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vloxseg2.mask.nxv4i8.nxv4i32(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vloxseg2ei8.v v7, (a0), v9, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vloxseg2.mask.nxv4i8.nxv4i8(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vloxseg2ei64.v v7, (a0), v12, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vloxseg2.mask.nxv4i8.nxv4i64(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vloxseg2ei16.v v7, (a0), v9, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vloxseg2.mask.nxv4i8.nxv4i16(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
; CHECK-LABEL: test_vloxseg3_mask_nxv4i8_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vloxseg3ei32.v v7, (a0), v10, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vloxseg3.mask.nxv4i8.nxv4i32(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
define <vscale x 4 x i8> @test_vloxseg3_mask_nxv4i8_nxv4i8(<vscale x 4 x i8> %val, i8* %base, <vscale x 4 x i8> %index, i64 %vl, <vscale x 4 x i1> %mask) {
; CHECK-LABEL: test_vloxseg3_mask_nxv4i8_nxv4i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v10, v9
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
-; CHECK-NEXT: vloxseg3ei8.v v10, (a0), v9, v0.t
-; CHECK-NEXT: vmv1r.v v8, v11
+; CHECK-NEXT: vloxseg3ei8.v v7, (a0), v10, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vloxseg3.mask.nxv4i8.nxv4i8(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
; CHECK-LABEL: test_vloxseg3_mask_nxv4i8_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vloxseg3ei64.v v7, (a0), v12, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vloxseg3.mask.nxv4i8.nxv4i64(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
define <vscale x 4 x i8> @test_vloxseg3_mask_nxv4i8_nxv4i16(<vscale x 4 x i8> %val, i8* %base, <vscale x 4 x i16> %index, i64 %vl, <vscale x 4 x i1> %mask) {
; CHECK-LABEL: test_vloxseg3_mask_nxv4i8_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v10, v9
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
-; CHECK-NEXT: vloxseg3ei16.v v10, (a0), v9, v0.t
-; CHECK-NEXT: vmv1r.v v8, v11
+; CHECK-NEXT: vloxseg3ei16.v v7, (a0), v10, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vloxseg3.mask.nxv4i8.nxv4i16(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
define <vscale x 4 x i8> @test_vloxseg4_mask_nxv4i8_nxv4i32(<vscale x 4 x i8> %val, i8* %base, <vscale x 4 x i32> %index, i64 %vl, <vscale x 4 x i1> %mask) {
; CHECK-LABEL: test_vloxseg4_mask_nxv4i8_nxv4i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv2r.v v12, v10
+; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
-; CHECK-NEXT: vloxseg4ei32.v v12, (a0), v10, v0.t
-; CHECK-NEXT: vmv1r.v v8, v13
+; CHECK-NEXT: vloxseg4ei32.v v7, (a0), v12, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vloxseg4.mask.nxv4i8.nxv4i32(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
; CHECK-LABEL: test_vloxseg4_mask_nxv4i8_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vloxseg4ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg4_mask_nxv4i8_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
-; CHECK-NEXT: vmv1r.v v10, v7
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vloxseg4ei64.v v7, (a0), v12, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vloxseg4.mask.nxv4i8.nxv4i64(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
; CHECK-LABEL: test_vloxseg4_mask_nxv4i8_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vloxseg4ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg5_mask_nxv4i8_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vloxseg5ei32.v v12, (a0), v10, v0.t
; CHECK-NEXT: vmv1r.v v8, v13
; CHECK-LABEL: test_vloxseg5_mask_nxv4i8_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vloxseg5ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg5_mask_nxv4i8_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
-; CHECK-NEXT: vmv1r.v v10, v7
-; CHECK-NEXT: vmv1r.v v11, v7
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vmv1r.v v11, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vloxseg5ei64.v v7, (a0), v12, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vloxseg5.mask.nxv4i8.nxv4i64(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
; CHECK-LABEL: test_vloxseg5_mask_nxv4i8_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vloxseg5ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg6_mask_nxv4i8_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vloxseg6ei32.v v12, (a0), v10, v0.t
; CHECK-NEXT: vmv1r.v v8, v13
; CHECK-LABEL: test_vloxseg6_mask_nxv4i8_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vloxseg6ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
define <vscale x 4 x i8> @test_vloxseg6_mask_nxv4i8_nxv4i64(<vscale x 4 x i8> %val, i8* %base, <vscale x 4 x i64> %index, i64 %vl, <vscale x 4 x i1> %mask) {
; CHECK-LABEL: test_vloxseg6_mask_nxv4i8_nxv4i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v16, v8
-; CHECK-NEXT: vmv1r.v v17, v16
-; CHECK-NEXT: vmv1r.v v18, v16
-; CHECK-NEXT: vmv1r.v v19, v16
-; CHECK-NEXT: vmv1r.v v20, v16
-; CHECK-NEXT: vmv1r.v v21, v16
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv4r.v v16, v12
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
-; CHECK-NEXT: vloxseg6ei64.v v16, (a0), v12, v0.t
-; CHECK-NEXT: vmv1r.v v8, v17
+; CHECK-NEXT: vloxseg6ei64.v v7, (a0), v16, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vloxseg6.mask.nxv4i8.nxv4i64(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
; CHECK-LABEL: test_vloxseg6_mask_nxv4i8_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vloxseg6ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg7_mask_nxv4i8_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
-; CHECK-NEXT: vmv1r.v v18, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vloxseg7ei32.v v12, (a0), v10, v0.t
; CHECK-NEXT: vmv1r.v v8, v13
; CHECK-LABEL: test_vloxseg7_mask_nxv4i8_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vloxseg7ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg7_mask_nxv4i8_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v16, v8
-; CHECK-NEXT: vmv1r.v v17, v16
-; CHECK-NEXT: vmv1r.v v18, v16
-; CHECK-NEXT: vmv1r.v v19, v16
-; CHECK-NEXT: vmv1r.v v20, v16
-; CHECK-NEXT: vmv1r.v v21, v16
-; CHECK-NEXT: vmv1r.v v22, v16
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
+; CHECK-NEXT: vmv1r.v v19, v8
+; CHECK-NEXT: vmv1r.v v20, v8
+; CHECK-NEXT: vmv1r.v v21, v8
+; CHECK-NEXT: vmv1r.v v22, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vloxseg7ei64.v v16, (a0), v12, v0.t
; CHECK-NEXT: vmv1r.v v8, v17
; CHECK-LABEL: test_vloxseg7_mask_nxv4i8_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vloxseg7ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg8_mask_nxv4i8_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
-; CHECK-NEXT: vmv1r.v v18, v12
-; CHECK-NEXT: vmv1r.v v19, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
+; CHECK-NEXT: vmv1r.v v19, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vloxseg8ei32.v v12, (a0), v10, v0.t
; CHECK-NEXT: vmv1r.v v8, v13
; CHECK-LABEL: test_vloxseg8_mask_nxv4i8_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vloxseg8ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg8_mask_nxv4i8_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v16, v8
-; CHECK-NEXT: vmv1r.v v17, v16
-; CHECK-NEXT: vmv1r.v v18, v16
-; CHECK-NEXT: vmv1r.v v19, v16
-; CHECK-NEXT: vmv1r.v v20, v16
-; CHECK-NEXT: vmv1r.v v21, v16
-; CHECK-NEXT: vmv1r.v v22, v16
-; CHECK-NEXT: vmv1r.v v23, v16
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
+; CHECK-NEXT: vmv1r.v v19, v8
+; CHECK-NEXT: vmv1r.v v20, v8
+; CHECK-NEXT: vmv1r.v v21, v8
+; CHECK-NEXT: vmv1r.v v22, v8
+; CHECK-NEXT: vmv1r.v v23, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vloxseg8ei64.v v16, (a0), v12, v0.t
; CHECK-NEXT: vmv1r.v v8, v17
; CHECK-LABEL: test_vloxseg8_mask_nxv4i8_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vloxseg8ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vloxseg2ei64.v v7, (a0), v9, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vloxseg2.mask.nxv1i16.nxv1i64(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 1 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vloxseg2ei32.v v7, (a0), v9, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vloxseg2.mask.nxv1i16.nxv1i32(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vloxseg2ei16.v v7, (a0), v9, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vloxseg2.mask.nxv1i16.nxv1i16(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vloxseg2ei8.v v7, (a0), v9, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vloxseg2.mask.nxv1i16.nxv1i8(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
define <vscale x 1 x i16> @test_vloxseg3_mask_nxv1i16_nxv1i64(<vscale x 1 x i16> %val, i16* %base, <vscale x 1 x i64> %index, i64 %vl, <vscale x 1 x i1> %mask) {
; CHECK-LABEL: test_vloxseg3_mask_nxv1i16_nxv1i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v10, v9
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
-; CHECK-NEXT: vloxseg3ei64.v v10, (a0), v9, v0.t
-; CHECK-NEXT: vmv1r.v v8, v11
+; CHECK-NEXT: vloxseg3ei64.v v7, (a0), v10, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vloxseg3.mask.nxv1i16.nxv1i64(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 1 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
define <vscale x 1 x i16> @test_vloxseg3_mask_nxv1i16_nxv1i32(<vscale x 1 x i16> %val, i16* %base, <vscale x 1 x i32> %index, i64 %vl, <vscale x 1 x i1> %mask) {
; CHECK-LABEL: test_vloxseg3_mask_nxv1i16_nxv1i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v10, v9
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
-; CHECK-NEXT: vloxseg3ei32.v v10, (a0), v9, v0.t
-; CHECK-NEXT: vmv1r.v v8, v11
+; CHECK-NEXT: vloxseg3ei32.v v7, (a0), v10, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vloxseg3.mask.nxv1i16.nxv1i32(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
define <vscale x 1 x i16> @test_vloxseg3_mask_nxv1i16_nxv1i16(<vscale x 1 x i16> %val, i16* %base, <vscale x 1 x i16> %index, i64 %vl, <vscale x 1 x i1> %mask) {
; CHECK-LABEL: test_vloxseg3_mask_nxv1i16_nxv1i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v10, v9
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
-; CHECK-NEXT: vloxseg3ei16.v v10, (a0), v9, v0.t
-; CHECK-NEXT: vmv1r.v v8, v11
+; CHECK-NEXT: vloxseg3ei16.v v7, (a0), v10, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vloxseg3.mask.nxv1i16.nxv1i16(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
define <vscale x 1 x i16> @test_vloxseg3_mask_nxv1i16_nxv1i8(<vscale x 1 x i16> %val, i16* %base, <vscale x 1 x i8> %index, i64 %vl, <vscale x 1 x i1> %mask) {
; CHECK-LABEL: test_vloxseg3_mask_nxv1i16_nxv1i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v10, v9
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
-; CHECK-NEXT: vloxseg3ei8.v v10, (a0), v9, v0.t
-; CHECK-NEXT: vmv1r.v v8, v11
+; CHECK-NEXT: vloxseg3ei8.v v7, (a0), v10, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vloxseg3.mask.nxv1i16.nxv1i8(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
; CHECK-LABEL: test_vloxseg4_mask_nxv1i16_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vloxseg4ei64.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg4_mask_nxv1i16_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vloxseg4ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg4_mask_nxv1i16_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vloxseg4ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg4_mask_nxv1i16_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vloxseg4ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg5_mask_nxv1i16_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vloxseg5ei64.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg5_mask_nxv1i16_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vloxseg5ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg5_mask_nxv1i16_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vloxseg5ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg5_mask_nxv1i16_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vloxseg5ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg6_mask_nxv1i16_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vloxseg6ei64.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg6_mask_nxv1i16_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vloxseg6ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg6_mask_nxv1i16_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vloxseg6ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg6_mask_nxv1i16_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vloxseg6ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg7_mask_nxv1i16_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vloxseg7ei64.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg7_mask_nxv1i16_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vloxseg7ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg7_mask_nxv1i16_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vloxseg7ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg7_mask_nxv1i16_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vloxseg7ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg8_mask_nxv1i16_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vloxseg8ei64.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg8_mask_nxv1i16_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vloxseg8ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg8_mask_nxv1i16_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vloxseg8ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg8_mask_nxv1i16_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vloxseg8ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vloxseg2ei32.v v7, (a0), v9, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vloxseg2.mask.nxv2i32.nxv2i32(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vloxseg2ei8.v v7, (a0), v9, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vloxseg2.mask.nxv2i32.nxv2i8(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vloxseg2ei16.v v7, (a0), v9, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vloxseg2.mask.nxv2i32.nxv2i16(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vloxseg2ei64.v v7, (a0), v10, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vloxseg2.mask.nxv2i32.nxv2i64(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 2 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
define <vscale x 2 x i32> @test_vloxseg3_mask_nxv2i32_nxv2i32(<vscale x 2 x i32> %val, i32* %base, <vscale x 2 x i32> %index, i64 %vl, <vscale x 2 x i1> %mask) {
; CHECK-LABEL: test_vloxseg3_mask_nxv2i32_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v10, v9
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
-; CHECK-NEXT: vloxseg3ei32.v v10, (a0), v9, v0.t
-; CHECK-NEXT: vmv1r.v v8, v11
+; CHECK-NEXT: vloxseg3ei32.v v7, (a0), v10, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vloxseg3.mask.nxv2i32.nxv2i32(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
define <vscale x 2 x i32> @test_vloxseg3_mask_nxv2i32_nxv2i8(<vscale x 2 x i32> %val, i32* %base, <vscale x 2 x i8> %index, i64 %vl, <vscale x 2 x i1> %mask) {
; CHECK-LABEL: test_vloxseg3_mask_nxv2i32_nxv2i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v10, v9
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
-; CHECK-NEXT: vloxseg3ei8.v v10, (a0), v9, v0.t
-; CHECK-NEXT: vmv1r.v v8, v11
+; CHECK-NEXT: vloxseg3ei8.v v7, (a0), v10, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vloxseg3.mask.nxv2i32.nxv2i8(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
define <vscale x 2 x i32> @test_vloxseg3_mask_nxv2i32_nxv2i16(<vscale x 2 x i32> %val, i32* %base, <vscale x 2 x i16> %index, i64 %vl, <vscale x 2 x i1> %mask) {
; CHECK-LABEL: test_vloxseg3_mask_nxv2i32_nxv2i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v10, v9
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
-; CHECK-NEXT: vloxseg3ei16.v v10, (a0), v9, v0.t
-; CHECK-NEXT: vmv1r.v v8, v11
+; CHECK-NEXT: vloxseg3ei16.v v7, (a0), v10, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vloxseg3.mask.nxv2i32.nxv2i16(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
; CHECK-LABEL: test_vloxseg3_mask_nxv2i32_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vloxseg3ei64.v v7, (a0), v10, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vloxseg3.mask.nxv2i32.nxv2i64(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 2 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
; CHECK-LABEL: test_vloxseg4_mask_nxv2i32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vloxseg4ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg4_mask_nxv2i32_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vloxseg4ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg4_mask_nxv2i32_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vloxseg4ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
define <vscale x 2 x i32> @test_vloxseg4_mask_nxv2i32_nxv2i64(<vscale x 2 x i32> %val, i32* %base, <vscale x 2 x i64> %index, i64 %vl, <vscale x 2 x i1> %mask) {
; CHECK-LABEL: test_vloxseg4_mask_nxv2i32_nxv2i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv2r.v v12, v10
+; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
-; CHECK-NEXT: vloxseg4ei64.v v12, (a0), v10, v0.t
-; CHECK-NEXT: vmv1r.v v8, v13
+; CHECK-NEXT: vloxseg4ei64.v v7, (a0), v12, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vloxseg4.mask.nxv2i32.nxv2i64(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 2 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
; CHECK-LABEL: test_vloxseg5_mask_nxv2i32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vloxseg5ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg5_mask_nxv2i32_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vloxseg5ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg5_mask_nxv2i32_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vloxseg5ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg5_mask_nxv2i32_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vloxseg5ei64.v v12, (a0), v10, v0.t
; CHECK-NEXT: vmv1r.v v8, v13
; CHECK-LABEL: test_vloxseg6_mask_nxv2i32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vloxseg6ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg6_mask_nxv2i32_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vloxseg6ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg6_mask_nxv2i32_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vloxseg6ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg6_mask_nxv2i32_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vloxseg6ei64.v v12, (a0), v10, v0.t
; CHECK-NEXT: vmv1r.v v8, v13
; CHECK-LABEL: test_vloxseg7_mask_nxv2i32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vloxseg7ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg7_mask_nxv2i32_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vloxseg7ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg7_mask_nxv2i32_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vloxseg7ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg7_mask_nxv2i32_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
-; CHECK-NEXT: vmv1r.v v18, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vloxseg7ei64.v v12, (a0), v10, v0.t
; CHECK-NEXT: vmv1r.v v8, v13
; CHECK-LABEL: test_vloxseg8_mask_nxv2i32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vloxseg8ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg8_mask_nxv2i32_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vloxseg8ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg8_mask_nxv2i32_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vloxseg8ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg8_mask_nxv2i32_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
-; CHECK-NEXT: vmv1r.v v18, v12
-; CHECK-NEXT: vmv1r.v v19, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
+; CHECK-NEXT: vmv1r.v v19, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vloxseg8ei64.v v12, (a0), v10, v0.t
; CHECK-NEXT: vmv1r.v v8, v13
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vloxseg2ei16.v v7, (a0), v10, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vloxseg2.mask.nxv8i8.nxv8i16(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i16> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vloxseg2ei8.v v7, (a0), v9, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vloxseg2.mask.nxv8i8.nxv8i8(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i8> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vloxseg2ei64.v v7, (a0), v16, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vloxseg2.mask.nxv8i8.nxv8i64(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i64> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vloxseg2ei32.v v7, (a0), v12, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vloxseg2.mask.nxv8i8.nxv8i32(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
; CHECK-LABEL: test_vloxseg3_mask_nxv8i8_nxv8i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vloxseg3ei16.v v7, (a0), v10, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vloxseg3.mask.nxv8i8.nxv8i16(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i16> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
define <vscale x 8 x i8> @test_vloxseg3_mask_nxv8i8_nxv8i8(<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i8> %index, i64 %vl, <vscale x 8 x i1> %mask) {
; CHECK-LABEL: test_vloxseg3_mask_nxv8i8_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v10, v9
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
-; CHECK-NEXT: vloxseg3ei8.v v10, (a0), v9, v0.t
-; CHECK-NEXT: vmv1r.v v8, v11
+; CHECK-NEXT: vloxseg3ei8.v v7, (a0), v10, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vloxseg3.mask.nxv8i8.nxv8i8(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i8> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
; CHECK-LABEL: test_vloxseg3_mask_nxv8i8_nxv8i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vloxseg3ei64.v v7, (a0), v16, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vloxseg3.mask.nxv8i8.nxv8i64(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i64> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
; CHECK-LABEL: test_vloxseg3_mask_nxv8i8_nxv8i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vloxseg3ei32.v v7, (a0), v12, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vloxseg3.mask.nxv8i8.nxv8i32(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
define <vscale x 8 x i8> @test_vloxseg4_mask_nxv8i8_nxv8i16(<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i16> %index, i64 %vl, <vscale x 8 x i1> %mask) {
; CHECK-LABEL: test_vloxseg4_mask_nxv8i8_nxv8i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv2r.v v12, v10
+; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
-; CHECK-NEXT: vloxseg4ei16.v v12, (a0), v10, v0.t
-; CHECK-NEXT: vmv1r.v v8, v13
+; CHECK-NEXT: vloxseg4ei16.v v7, (a0), v12, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vloxseg4.mask.nxv8i8.nxv8i16(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i16> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
; CHECK-LABEL: test_vloxseg4_mask_nxv8i8_nxv8i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vloxseg4ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg4_mask_nxv8i8_nxv8i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
-; CHECK-NEXT: vmv1r.v v10, v7
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vloxseg4ei64.v v7, (a0), v16, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vloxseg4.mask.nxv8i8.nxv8i64(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i64> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
; CHECK-LABEL: test_vloxseg4_mask_nxv8i8_nxv8i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
-; CHECK-NEXT: vmv1r.v v10, v7
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vloxseg4ei32.v v7, (a0), v12, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vloxseg4.mask.nxv8i8.nxv8i32(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
; CHECK-LABEL: test_vloxseg5_mask_nxv8i8_nxv8i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vloxseg5ei16.v v12, (a0), v10, v0.t
; CHECK-NEXT: vmv1r.v v8, v13
; CHECK-LABEL: test_vloxseg5_mask_nxv8i8_nxv8i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vloxseg5ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg5_mask_nxv8i8_nxv8i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
-; CHECK-NEXT: vmv1r.v v10, v7
-; CHECK-NEXT: vmv1r.v v11, v7
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vmv1r.v v11, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vloxseg5ei64.v v7, (a0), v16, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vloxseg5.mask.nxv8i8.nxv8i64(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i64> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
; CHECK-LABEL: test_vloxseg5_mask_nxv8i8_nxv8i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
-; CHECK-NEXT: vmv1r.v v10, v7
-; CHECK-NEXT: vmv1r.v v11, v7
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vmv1r.v v11, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vloxseg5ei32.v v7, (a0), v12, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vloxseg5.mask.nxv8i8.nxv8i32(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
; CHECK-LABEL: test_vloxseg6_mask_nxv8i8_nxv8i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vloxseg6ei16.v v12, (a0), v10, v0.t
; CHECK-NEXT: vmv1r.v v8, v13
; CHECK-LABEL: test_vloxseg6_mask_nxv8i8_nxv8i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vloxseg6ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg6_mask_nxv8i8_nxv8i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
-; CHECK-NEXT: vmv1r.v v10, v7
-; CHECK-NEXT: vmv1r.v v11, v7
-; CHECK-NEXT: vmv1r.v v12, v7
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vloxseg6ei64.v v7, (a0), v16, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vloxseg6.mask.nxv8i8.nxv8i64(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i64> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
define <vscale x 8 x i8> @test_vloxseg6_mask_nxv8i8_nxv8i32(<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i32> %index, i64 %vl, <vscale x 8 x i1> %mask) {
; CHECK-LABEL: test_vloxseg6_mask_nxv8i8_nxv8i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v16, v8
-; CHECK-NEXT: vmv1r.v v17, v16
-; CHECK-NEXT: vmv1r.v v18, v16
-; CHECK-NEXT: vmv1r.v v19, v16
-; CHECK-NEXT: vmv1r.v v20, v16
-; CHECK-NEXT: vmv1r.v v21, v16
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv4r.v v16, v12
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
-; CHECK-NEXT: vloxseg6ei32.v v16, (a0), v12, v0.t
-; CHECK-NEXT: vmv1r.v v8, v17
+; CHECK-NEXT: vloxseg6ei32.v v7, (a0), v16, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vloxseg6.mask.nxv8i8.nxv8i32(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
; CHECK-LABEL: test_vloxseg7_mask_nxv8i8_nxv8i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
-; CHECK-NEXT: vmv1r.v v18, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vloxseg7ei16.v v12, (a0), v10, v0.t
; CHECK-NEXT: vmv1r.v v8, v13
; CHECK-LABEL: test_vloxseg7_mask_nxv8i8_nxv8i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vloxseg7ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg7_mask_nxv8i8_nxv8i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
-; CHECK-NEXT: vmv1r.v v10, v7
-; CHECK-NEXT: vmv1r.v v11, v7
-; CHECK-NEXT: vmv1r.v v12, v7
-; CHECK-NEXT: vmv1r.v v13, v7
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vloxseg7ei64.v v7, (a0), v16, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vloxseg7.mask.nxv8i8.nxv8i64(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i64> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
; CHECK-LABEL: test_vloxseg7_mask_nxv8i8_nxv8i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v16, v8
-; CHECK-NEXT: vmv1r.v v17, v16
-; CHECK-NEXT: vmv1r.v v18, v16
-; CHECK-NEXT: vmv1r.v v19, v16
-; CHECK-NEXT: vmv1r.v v20, v16
-; CHECK-NEXT: vmv1r.v v21, v16
-; CHECK-NEXT: vmv1r.v v22, v16
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
+; CHECK-NEXT: vmv1r.v v19, v8
+; CHECK-NEXT: vmv1r.v v20, v8
+; CHECK-NEXT: vmv1r.v v21, v8
+; CHECK-NEXT: vmv1r.v v22, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vloxseg7ei32.v v16, (a0), v12, v0.t
; CHECK-NEXT: vmv1r.v v8, v17
; CHECK-LABEL: test_vloxseg8_mask_nxv8i8_nxv8i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
-; CHECK-NEXT: vmv1r.v v18, v12
-; CHECK-NEXT: vmv1r.v v19, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
+; CHECK-NEXT: vmv1r.v v19, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vloxseg8ei16.v v12, (a0), v10, v0.t
; CHECK-NEXT: vmv1r.v v8, v13
; CHECK-LABEL: test_vloxseg8_mask_nxv8i8_nxv8i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vloxseg8ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg8_mask_nxv8i8_nxv8i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
-; CHECK-NEXT: vmv1r.v v10, v7
-; CHECK-NEXT: vmv1r.v v11, v7
-; CHECK-NEXT: vmv1r.v v12, v7
-; CHECK-NEXT: vmv1r.v v13, v7
-; CHECK-NEXT: vmv1r.v v14, v7
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vloxseg8ei64.v v7, (a0), v16, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vloxseg8.mask.nxv8i8.nxv8i64(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i64> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
; CHECK-LABEL: test_vloxseg8_mask_nxv8i8_nxv8i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v16, v8
-; CHECK-NEXT: vmv1r.v v17, v16
-; CHECK-NEXT: vmv1r.v v18, v16
-; CHECK-NEXT: vmv1r.v v19, v16
-; CHECK-NEXT: vmv1r.v v20, v16
-; CHECK-NEXT: vmv1r.v v21, v16
-; CHECK-NEXT: vmv1r.v v22, v16
-; CHECK-NEXT: vmv1r.v v23, v16
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
+; CHECK-NEXT: vmv1r.v v19, v8
+; CHECK-NEXT: vmv1r.v v20, v8
+; CHECK-NEXT: vmv1r.v v21, v8
+; CHECK-NEXT: vmv1r.v v22, v8
+; CHECK-NEXT: vmv1r.v v23, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vloxseg8ei32.v v16, (a0), v12, v0.t
; CHECK-NEXT: vmv1r.v v8, v17
; CHECK-NEXT: vmv4r.v v4, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu
; CHECK-NEXT: vloxseg2ei32.v v4, (a0), v12, v0.t
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i64>,<vscale x 4 x i64>} @llvm.riscv.vloxseg2.mask.nxv4i64.nxv4i32(<vscale x 4 x i64> %val,<vscale x 4 x i64> %val, i64* %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv4r.v v4, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu
; CHECK-NEXT: vloxseg2ei8.v v4, (a0), v12, v0.t
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i64>,<vscale x 4 x i64>} @llvm.riscv.vloxseg2.mask.nxv4i64.nxv4i8(<vscale x 4 x i64> %val,<vscale x 4 x i64> %val, i64* %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv4r.v v4, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu
; CHECK-NEXT: vloxseg2ei64.v v4, (a0), v12, v0.t
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i64>,<vscale x 4 x i64>} @llvm.riscv.vloxseg2.mask.nxv4i64.nxv4i64(<vscale x 4 x i64> %val,<vscale x 4 x i64> %val, i64* %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv4r.v v4, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu
; CHECK-NEXT: vloxseg2ei16.v v4, (a0), v12, v0.t
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i64>,<vscale x 4 x i64>} @llvm.riscv.vloxseg2.mask.nxv4i64.nxv4i16(<vscale x 4 x i64> %val,<vscale x 4 x i64> %val, i64* %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vloxseg2ei32.v v7, (a0), v10, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vloxseg2.mask.nxv4i16.nxv4i32(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vloxseg2ei8.v v7, (a0), v9, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vloxseg2.mask.nxv4i16.nxv4i8(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vloxseg2ei64.v v7, (a0), v12, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vloxseg2.mask.nxv4i16.nxv4i64(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vloxseg2ei16.v v7, (a0), v9, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vloxseg2.mask.nxv4i16.nxv4i16(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
; CHECK-LABEL: test_vloxseg3_mask_nxv4i16_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vloxseg3ei32.v v7, (a0), v10, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vloxseg3.mask.nxv4i16.nxv4i32(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
define <vscale x 4 x i16> @test_vloxseg3_mask_nxv4i16_nxv4i8(<vscale x 4 x i16> %val, i16* %base, <vscale x 4 x i8> %index, i64 %vl, <vscale x 4 x i1> %mask) {
; CHECK-LABEL: test_vloxseg3_mask_nxv4i16_nxv4i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v10, v9
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
-; CHECK-NEXT: vloxseg3ei8.v v10, (a0), v9, v0.t
-; CHECK-NEXT: vmv1r.v v8, v11
+; CHECK-NEXT: vloxseg3ei8.v v7, (a0), v10, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vloxseg3.mask.nxv4i16.nxv4i8(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
; CHECK-LABEL: test_vloxseg3_mask_nxv4i16_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vloxseg3ei64.v v7, (a0), v12, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vloxseg3.mask.nxv4i16.nxv4i64(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
define <vscale x 4 x i16> @test_vloxseg3_mask_nxv4i16_nxv4i16(<vscale x 4 x i16> %val, i16* %base, <vscale x 4 x i16> %index, i64 %vl, <vscale x 4 x i1> %mask) {
; CHECK-LABEL: test_vloxseg3_mask_nxv4i16_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v10, v9
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
-; CHECK-NEXT: vloxseg3ei16.v v10, (a0), v9, v0.t
-; CHECK-NEXT: vmv1r.v v8, v11
+; CHECK-NEXT: vloxseg3ei16.v v7, (a0), v10, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vloxseg3.mask.nxv4i16.nxv4i16(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
define <vscale x 4 x i16> @test_vloxseg4_mask_nxv4i16_nxv4i32(<vscale x 4 x i16> %val, i16* %base, <vscale x 4 x i32> %index, i64 %vl, <vscale x 4 x i1> %mask) {
; CHECK-LABEL: test_vloxseg4_mask_nxv4i16_nxv4i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv2r.v v12, v10
+; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
-; CHECK-NEXT: vloxseg4ei32.v v12, (a0), v10, v0.t
-; CHECK-NEXT: vmv1r.v v8, v13
+; CHECK-NEXT: vloxseg4ei32.v v7, (a0), v12, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vloxseg4.mask.nxv4i16.nxv4i32(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
; CHECK-LABEL: test_vloxseg4_mask_nxv4i16_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vloxseg4ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg4_mask_nxv4i16_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
-; CHECK-NEXT: vmv1r.v v10, v7
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vloxseg4ei64.v v7, (a0), v12, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vloxseg4.mask.nxv4i16.nxv4i64(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
; CHECK-LABEL: test_vloxseg4_mask_nxv4i16_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vloxseg4ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg5_mask_nxv4i16_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vloxseg5ei32.v v12, (a0), v10, v0.t
; CHECK-NEXT: vmv1r.v v8, v13
; CHECK-LABEL: test_vloxseg5_mask_nxv4i16_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vloxseg5ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg5_mask_nxv4i16_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
-; CHECK-NEXT: vmv1r.v v10, v7
-; CHECK-NEXT: vmv1r.v v11, v7
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vmv1r.v v11, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vloxseg5ei64.v v7, (a0), v12, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vloxseg5.mask.nxv4i16.nxv4i64(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
; CHECK-LABEL: test_vloxseg5_mask_nxv4i16_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vloxseg5ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg6_mask_nxv4i16_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vloxseg6ei32.v v12, (a0), v10, v0.t
; CHECK-NEXT: vmv1r.v v8, v13
; CHECK-LABEL: test_vloxseg6_mask_nxv4i16_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vloxseg6ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
define <vscale x 4 x i16> @test_vloxseg6_mask_nxv4i16_nxv4i64(<vscale x 4 x i16> %val, i16* %base, <vscale x 4 x i64> %index, i64 %vl, <vscale x 4 x i1> %mask) {
; CHECK-LABEL: test_vloxseg6_mask_nxv4i16_nxv4i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v16, v8
-; CHECK-NEXT: vmv1r.v v17, v16
-; CHECK-NEXT: vmv1r.v v18, v16
-; CHECK-NEXT: vmv1r.v v19, v16
-; CHECK-NEXT: vmv1r.v v20, v16
-; CHECK-NEXT: vmv1r.v v21, v16
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv4r.v v16, v12
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
-; CHECK-NEXT: vloxseg6ei64.v v16, (a0), v12, v0.t
-; CHECK-NEXT: vmv1r.v v8, v17
+; CHECK-NEXT: vloxseg6ei64.v v7, (a0), v16, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vloxseg6.mask.nxv4i16.nxv4i64(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
; CHECK-LABEL: test_vloxseg6_mask_nxv4i16_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vloxseg6ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg7_mask_nxv4i16_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
-; CHECK-NEXT: vmv1r.v v18, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vloxseg7ei32.v v12, (a0), v10, v0.t
; CHECK-NEXT: vmv1r.v v8, v13
; CHECK-LABEL: test_vloxseg7_mask_nxv4i16_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vloxseg7ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg7_mask_nxv4i16_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v16, v8
-; CHECK-NEXT: vmv1r.v v17, v16
-; CHECK-NEXT: vmv1r.v v18, v16
-; CHECK-NEXT: vmv1r.v v19, v16
-; CHECK-NEXT: vmv1r.v v20, v16
-; CHECK-NEXT: vmv1r.v v21, v16
-; CHECK-NEXT: vmv1r.v v22, v16
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
+; CHECK-NEXT: vmv1r.v v19, v8
+; CHECK-NEXT: vmv1r.v v20, v8
+; CHECK-NEXT: vmv1r.v v21, v8
+; CHECK-NEXT: vmv1r.v v22, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vloxseg7ei64.v v16, (a0), v12, v0.t
; CHECK-NEXT: vmv1r.v v8, v17
; CHECK-LABEL: test_vloxseg7_mask_nxv4i16_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vloxseg7ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg8_mask_nxv4i16_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
-; CHECK-NEXT: vmv1r.v v18, v12
-; CHECK-NEXT: vmv1r.v v19, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
+; CHECK-NEXT: vmv1r.v v19, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vloxseg8ei32.v v12, (a0), v10, v0.t
; CHECK-NEXT: vmv1r.v v8, v13
; CHECK-LABEL: test_vloxseg8_mask_nxv4i16_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vloxseg8ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg8_mask_nxv4i16_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v16, v8
-; CHECK-NEXT: vmv1r.v v17, v16
-; CHECK-NEXT: vmv1r.v v18, v16
-; CHECK-NEXT: vmv1r.v v19, v16
-; CHECK-NEXT: vmv1r.v v20, v16
-; CHECK-NEXT: vmv1r.v v21, v16
-; CHECK-NEXT: vmv1r.v v22, v16
-; CHECK-NEXT: vmv1r.v v23, v16
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
+; CHECK-NEXT: vmv1r.v v19, v8
+; CHECK-NEXT: vmv1r.v v20, v8
+; CHECK-NEXT: vmv1r.v v21, v8
+; CHECK-NEXT: vmv1r.v v22, v8
+; CHECK-NEXT: vmv1r.v v23, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vloxseg8ei64.v v16, (a0), v12, v0.t
; CHECK-NEXT: vmv1r.v v8, v17
; CHECK-LABEL: test_vloxseg8_mask_nxv4i16_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vloxseg8ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vloxseg2ei64.v v7, (a0), v9, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vloxseg2.mask.nxv1i8.nxv1i64(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, <vscale x 1 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vloxseg2ei32.v v7, (a0), v9, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vloxseg2.mask.nxv1i8.nxv1i32(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vloxseg2ei16.v v7, (a0), v9, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vloxseg2.mask.nxv1i8.nxv1i16(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vloxseg2ei8.v v7, (a0), v9, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vloxseg2.mask.nxv1i8.nxv1i8(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
define <vscale x 1 x i8> @test_vloxseg3_mask_nxv1i8_nxv1i64(<vscale x 1 x i8> %val, i8* %base, <vscale x 1 x i64> %index, i64 %vl, <vscale x 1 x i1> %mask) {
; CHECK-LABEL: test_vloxseg3_mask_nxv1i8_nxv1i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v10, v9
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
-; CHECK-NEXT: vloxseg3ei64.v v10, (a0), v9, v0.t
-; CHECK-NEXT: vmv1r.v v8, v11
+; CHECK-NEXT: vloxseg3ei64.v v7, (a0), v10, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vloxseg3.mask.nxv1i8.nxv1i64(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, <vscale x 1 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
define <vscale x 1 x i8> @test_vloxseg3_mask_nxv1i8_nxv1i32(<vscale x 1 x i8> %val, i8* %base, <vscale x 1 x i32> %index, i64 %vl, <vscale x 1 x i1> %mask) {
; CHECK-LABEL: test_vloxseg3_mask_nxv1i8_nxv1i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v10, v9
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
-; CHECK-NEXT: vloxseg3ei32.v v10, (a0), v9, v0.t
-; CHECK-NEXT: vmv1r.v v8, v11
+; CHECK-NEXT: vloxseg3ei32.v v7, (a0), v10, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vloxseg3.mask.nxv1i8.nxv1i32(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
define <vscale x 1 x i8> @test_vloxseg3_mask_nxv1i8_nxv1i16(<vscale x 1 x i8> %val, i8* %base, <vscale x 1 x i16> %index, i64 %vl, <vscale x 1 x i1> %mask) {
; CHECK-LABEL: test_vloxseg3_mask_nxv1i8_nxv1i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v10, v9
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
-; CHECK-NEXT: vloxseg3ei16.v v10, (a0), v9, v0.t
-; CHECK-NEXT: vmv1r.v v8, v11
+; CHECK-NEXT: vloxseg3ei16.v v7, (a0), v10, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vloxseg3.mask.nxv1i8.nxv1i16(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
define <vscale x 1 x i8> @test_vloxseg3_mask_nxv1i8_nxv1i8(<vscale x 1 x i8> %val, i8* %base, <vscale x 1 x i8> %index, i64 %vl, <vscale x 1 x i1> %mask) {
; CHECK-LABEL: test_vloxseg3_mask_nxv1i8_nxv1i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v10, v9
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
-; CHECK-NEXT: vloxseg3ei8.v v10, (a0), v9, v0.t
-; CHECK-NEXT: vmv1r.v v8, v11
+; CHECK-NEXT: vloxseg3ei8.v v7, (a0), v10, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vloxseg3.mask.nxv1i8.nxv1i8(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
; CHECK-LABEL: test_vloxseg4_mask_nxv1i8_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vloxseg4ei64.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg4_mask_nxv1i8_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vloxseg4ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg4_mask_nxv1i8_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vloxseg4ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg4_mask_nxv1i8_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vloxseg4ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg5_mask_nxv1i8_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vloxseg5ei64.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg5_mask_nxv1i8_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vloxseg5ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg5_mask_nxv1i8_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vloxseg5ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg5_mask_nxv1i8_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vloxseg5ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg6_mask_nxv1i8_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vloxseg6ei64.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg6_mask_nxv1i8_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vloxseg6ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg6_mask_nxv1i8_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vloxseg6ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg6_mask_nxv1i8_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vloxseg6ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg7_mask_nxv1i8_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vloxseg7ei64.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg7_mask_nxv1i8_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vloxseg7ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg7_mask_nxv1i8_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vloxseg7ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg7_mask_nxv1i8_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vloxseg7ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg8_mask_nxv1i8_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vloxseg8ei64.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg8_mask_nxv1i8_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vloxseg8ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg8_mask_nxv1i8_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
-; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vloxseg8ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-NEXT: ret
; CHECK-LABEL: test_vloxseg8_mask_nxv1i8_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vloxseg8ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vloxseg2ei32.v v7, (a0), v9, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vloxseg2.mask.nxv2i8.nxv2i32(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, i8* %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vloxseg2ei8.v v7, (a0), v9, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vloxseg2.mask.nxv2i8.nxv2i8(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, i8* %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vloxseg2ei16.v v7, (a0), v9, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vloxseg2.mask.nxv2i8.nxv2i16(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, i8* %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vloxseg2ei64.v v7, (a0), v10, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vloxseg2.mask.nxv2i8.nxv2i64(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, i8* %base, <vscale x 2 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
define <vscale x 2 x i8> @test_vloxseg3_mask_nxv2i8_nxv2i32(<vscale x 2 x i8> %val, i8* %base, <vscale x 2 x i32> %index, i64 %vl, <vscale x 2 x i1> %mask) {
; CHECK-LABEL: test_vloxseg3_mask_nxv2i8_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v10, v9
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
-; CHECK-NEXT: vloxseg3ei32.v v10, (a0), v9, v0.t
-; CHECK-NEXT: vmv1r.v v8, v11
+; CHECK-NEXT: vloxseg3ei32.v v7, (a0), v10, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vloxseg3.mask.nxv2i8.nxv2i32(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, i8* %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
define <vscale x 2 x i8> @test_vloxseg3_mask_nxv2i8_nxv2i8(<vscale x 2 x i8> %val, i8* %base, <vscale x 2 x i8> %index, i64 %vl, <vscale x 2 x i1> %mask) {
; CHECK-LABEL: test_vloxseg3_mask_nxv2i8_nxv2i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v10, v9
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
-; CHECK-NEXT: vloxseg3ei8.v v10, (a0), v9, v0.t
-; CHECK-NEXT: vmv1r.v v8, v11
+; CHECK-NEXT: vloxseg3ei8.v v7, (a0), v10, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vloxseg3.mask.nxv2i8.nxv2i8(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, i8* %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
define <vscale x 2 x i8> @test_vloxseg3_mask_nxv2i8_nxv2i16(<vscale x 2 x i8> %val, i8* %base, <vscale x 2 x i16> %index, i64 %vl, <vscale x 2 x i1> %mask) {
; CHECK-LABEL: test_vloxseg3_mask_nxv2i8_nxv2i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v10, v9
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
-; CHECK-NEXT: vloxseg3ei16.v v10, (a0), v9, v0.t
-; CHECK-NEXT: vmv1r.v v8, v11
+; CHECK-NEXT: vloxseg3ei16.v v7, (a0), v10, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vloxseg3.mask.nxv2i8.nxv2i16(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, i8* %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
; CHECK-LABEL: test_vloxseg3_mask_nxv2i8_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vloxseg3ei64.v v7, (a0), v10, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vloxseg3.mask.nxv2i8.nxv2i64(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, i8* %base, <vscale x 2 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
; CHECK-LABEL: test_vloxseg4_mask_nxv2i8_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vloxseg4ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg4_mask_nxv2i8_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vloxseg4ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg4_mask_nxv2i8_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vloxseg4ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
define <vscale x 2 x i8> @test_vloxseg4_mask_nxv2i8_nxv2i64(<vscale x 2 x i8> %val, i8* %base, <vscale x 2 x i64> %index, i64 %vl, <vscale x 2 x i1> %mask) {
; CHECK-LABEL: test_vloxseg4_mask_nxv2i8_nxv2i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv2r.v v12, v10
+; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
-; CHECK-NEXT: vloxseg4ei64.v v12, (a0), v10, v0.t
-; CHECK-NEXT: vmv1r.v v8, v13
+; CHECK-NEXT: vloxseg4ei64.v v7, (a0), v12, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vloxseg4.mask.nxv2i8.nxv2i64(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, i8* %base, <vscale x 2 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
; CHECK-LABEL: test_vloxseg5_mask_nxv2i8_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vloxseg5ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg5_mask_nxv2i8_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vloxseg5ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg5_mask_nxv2i8_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vloxseg5ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg5_mask_nxv2i8_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vloxseg5ei64.v v12, (a0), v10, v0.t
; CHECK-NEXT: vmv1r.v v8, v13
; CHECK-LABEL: test_vloxseg6_mask_nxv2i8_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vloxseg6ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg6_mask_nxv2i8_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vloxseg6ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg6_mask_nxv2i8_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vloxseg6ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg6_mask_nxv2i8_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vloxseg6ei64.v v12, (a0), v10, v0.t
; CHECK-NEXT: vmv1r.v v8, v13
; CHECK-LABEL: test_vloxseg7_mask_nxv2i8_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vloxseg7ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg7_mask_nxv2i8_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vloxseg7ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg7_mask_nxv2i8_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vloxseg7ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg7_mask_nxv2i8_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
-; CHECK-NEXT: vmv1r.v v18, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vloxseg7ei64.v v12, (a0), v10, v0.t
; CHECK-NEXT: vmv1r.v v8, v13
; CHECK-LABEL: test_vloxseg8_mask_nxv2i8_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vloxseg8ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg8_mask_nxv2i8_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vloxseg8ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg8_mask_nxv2i8_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vloxseg8ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg8_mask_nxv2i8_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
-; CHECK-NEXT: vmv1r.v v18, v12
-; CHECK-NEXT: vmv1r.v v19, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
+; CHECK-NEXT: vmv1r.v v19, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vloxseg8ei64.v v12, (a0), v10, v0.t
; CHECK-NEXT: vmv1r.v v8, v13
; CHECK-NEXT: vmv4r.v v4, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
; CHECK-NEXT: vloxseg2ei16.v v4, (a0), v12, v0.t
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i32>,<vscale x 8 x i32>} @llvm.riscv.vloxseg2.mask.nxv8i32.nxv8i16(<vscale x 8 x i32> %val,<vscale x 8 x i32> %val, i32* %base, <vscale x 8 x i16> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv4r.v v4, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
; CHECK-NEXT: vloxseg2ei8.v v4, (a0), v12, v0.t
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i32>,<vscale x 8 x i32>} @llvm.riscv.vloxseg2.mask.nxv8i32.nxv8i8(<vscale x 8 x i32> %val,<vscale x 8 x i32> %val, i32* %base, <vscale x 8 x i8> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv4r.v v4, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
; CHECK-NEXT: vloxseg2ei64.v v4, (a0), v16, v0.t
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i32>,<vscale x 8 x i32>} @llvm.riscv.vloxseg2.mask.nxv8i32.nxv8i64(<vscale x 8 x i32> %val,<vscale x 8 x i32> %val, i32* %base, <vscale x 8 x i64> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv4r.v v4, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
; CHECK-NEXT: vloxseg2ei32.v v4, (a0), v12, v0.t
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i32>,<vscale x 8 x i32>} @llvm.riscv.vloxseg2.mask.nxv8i32.nxv8i32(<vscale x 8 x i32> %val,<vscale x 8 x i32> %val, i32* %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv4r.v v4, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu
; CHECK-NEXT: vloxseg2ei16.v v4, (a0), v16, v0.t
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 32 x i8>,<vscale x 32 x i8>} @llvm.riscv.vloxseg2.mask.nxv32i8.nxv32i16(<vscale x 32 x i8> %val,<vscale x 32 x i8> %val, i8* %base, <vscale x 32 x i16> %index, <vscale x 32 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv4r.v v4, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu
; CHECK-NEXT: vloxseg2ei8.v v4, (a0), v12, v0.t
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 32 x i8>,<vscale x 32 x i8>} @llvm.riscv.vloxseg2.mask.nxv32i8.nxv32i8(<vscale x 32 x i8> %val,<vscale x 32 x i8> %val, i8* %base, <vscale x 32 x i8> %index, <vscale x 32 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vloxseg2ei32.v v7, (a0), v9, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vloxseg2.mask.nxv2i16.nxv2i32(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, i16* %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vloxseg2ei8.v v7, (a0), v9, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vloxseg2.mask.nxv2i16.nxv2i8(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, i16* %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vloxseg2ei16.v v7, (a0), v9, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vloxseg2.mask.nxv2i16.nxv2i16(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, i16* %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vloxseg2ei64.v v7, (a0), v10, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vloxseg2.mask.nxv2i16.nxv2i64(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, i16* %base, <vscale x 2 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
define <vscale x 2 x i16> @test_vloxseg3_mask_nxv2i16_nxv2i32(<vscale x 2 x i16> %val, i16* %base, <vscale x 2 x i32> %index, i64 %vl, <vscale x 2 x i1> %mask) {
; CHECK-LABEL: test_vloxseg3_mask_nxv2i16_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v10, v9
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
-; CHECK-NEXT: vloxseg3ei32.v v10, (a0), v9, v0.t
-; CHECK-NEXT: vmv1r.v v8, v11
+; CHECK-NEXT: vloxseg3ei32.v v7, (a0), v10, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vloxseg3.mask.nxv2i16.nxv2i32(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, i16* %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
define <vscale x 2 x i16> @test_vloxseg3_mask_nxv2i16_nxv2i8(<vscale x 2 x i16> %val, i16* %base, <vscale x 2 x i8> %index, i64 %vl, <vscale x 2 x i1> %mask) {
; CHECK-LABEL: test_vloxseg3_mask_nxv2i16_nxv2i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v10, v9
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
-; CHECK-NEXT: vloxseg3ei8.v v10, (a0), v9, v0.t
-; CHECK-NEXT: vmv1r.v v8, v11
+; CHECK-NEXT: vloxseg3ei8.v v7, (a0), v10, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vloxseg3.mask.nxv2i16.nxv2i8(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, i16* %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
define <vscale x 2 x i16> @test_vloxseg3_mask_nxv2i16_nxv2i16(<vscale x 2 x i16> %val, i16* %base, <vscale x 2 x i16> %index, i64 %vl, <vscale x 2 x i1> %mask) {
; CHECK-LABEL: test_vloxseg3_mask_nxv2i16_nxv2i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v10, v9
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
-; CHECK-NEXT: vloxseg3ei16.v v10, (a0), v9, v0.t
-; CHECK-NEXT: vmv1r.v v8, v11
+; CHECK-NEXT: vloxseg3ei16.v v7, (a0), v10, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vloxseg3.mask.nxv2i16.nxv2i16(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, i16* %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
; CHECK-LABEL: test_vloxseg3_mask_nxv2i16_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vloxseg3ei64.v v7, (a0), v10, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vloxseg3.mask.nxv2i16.nxv2i64(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, i16* %base, <vscale x 2 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
; CHECK-LABEL: test_vloxseg4_mask_nxv2i16_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vloxseg4ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg4_mask_nxv2i16_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vloxseg4ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg4_mask_nxv2i16_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vloxseg4ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
define <vscale x 2 x i16> @test_vloxseg4_mask_nxv2i16_nxv2i64(<vscale x 2 x i16> %val, i16* %base, <vscale x 2 x i64> %index, i64 %vl, <vscale x 2 x i1> %mask) {
; CHECK-LABEL: test_vloxseg4_mask_nxv2i16_nxv2i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv2r.v v12, v10
+; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
-; CHECK-NEXT: vloxseg4ei64.v v12, (a0), v10, v0.t
-; CHECK-NEXT: vmv1r.v v8, v13
+; CHECK-NEXT: vloxseg4ei64.v v7, (a0), v12, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vloxseg4.mask.nxv2i16.nxv2i64(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, i16* %base, <vscale x 2 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
; CHECK-LABEL: test_vloxseg5_mask_nxv2i16_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vloxseg5ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg5_mask_nxv2i16_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vloxseg5ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg5_mask_nxv2i16_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vloxseg5ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg5_mask_nxv2i16_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vloxseg5ei64.v v12, (a0), v10, v0.t
; CHECK-NEXT: vmv1r.v v8, v13
; CHECK-LABEL: test_vloxseg6_mask_nxv2i16_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vloxseg6ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg6_mask_nxv2i16_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vloxseg6ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg6_mask_nxv2i16_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vloxseg6ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg6_mask_nxv2i16_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vloxseg6ei64.v v12, (a0), v10, v0.t
; CHECK-NEXT: vmv1r.v v8, v13
; CHECK-LABEL: test_vloxseg7_mask_nxv2i16_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vloxseg7ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg7_mask_nxv2i16_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vloxseg7ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg7_mask_nxv2i16_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vloxseg7ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg7_mask_nxv2i16_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
-; CHECK-NEXT: vmv1r.v v18, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vloxseg7ei64.v v12, (a0), v10, v0.t
; CHECK-NEXT: vmv1r.v v8, v13
; CHECK-LABEL: test_vloxseg8_mask_nxv2i16_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vloxseg8ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg8_mask_nxv2i16_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vloxseg8ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg8_mask_nxv2i16_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vloxseg8ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg8_mask_nxv2i16_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
-; CHECK-NEXT: vmv1r.v v18, v12
-; CHECK-NEXT: vmv1r.v v19, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
+; CHECK-NEXT: vmv1r.v v19, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vloxseg8ei64.v v12, (a0), v10, v0.t
; CHECK-NEXT: vmv1r.v v8, v13
; CHECK-NEXT: vmv2r.v v6, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
; CHECK-NEXT: vloxseg2ei32.v v6, (a0), v10, v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i64>,<vscale x 2 x i64>} @llvm.riscv.vloxseg2.mask.nxv2i64.nxv2i32(<vscale x 2 x i64> %val,<vscale x 2 x i64> %val, i64* %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv2r.v v6, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
; CHECK-NEXT: vloxseg2ei8.v v6, (a0), v10, v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i64>,<vscale x 2 x i64>} @llvm.riscv.vloxseg2.mask.nxv2i64.nxv2i8(<vscale x 2 x i64> %val,<vscale x 2 x i64> %val, i64* %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv2r.v v6, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
; CHECK-NEXT: vloxseg2ei16.v v6, (a0), v10, v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i64>,<vscale x 2 x i64>} @llvm.riscv.vloxseg2.mask.nxv2i64.nxv2i16(<vscale x 2 x i64> %val,<vscale x 2 x i64> %val, i64* %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv2r.v v6, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
; CHECK-NEXT: vloxseg2ei64.v v6, (a0), v10, v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i64>,<vscale x 2 x i64>} @llvm.riscv.vloxseg2.mask.nxv2i64.nxv2i64(<vscale x 2 x i64> %val,<vscale x 2 x i64> %val, i64* %base, <vscale x 2 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
define <vscale x 2 x i64> @test_vloxseg3_mask_nxv2i64_nxv2i32(<vscale x 2 x i64> %val, i64* %base, <vscale x 2 x i32> %index, i64 %vl, <vscale x 2 x i1> %mask) {
; CHECK-LABEL: test_vloxseg3_mask_nxv2i64_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v6, v8
+; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
-; CHECK-NEXT: vloxseg3ei32.v v12, (a0), v10, v0.t
-; CHECK-NEXT: vmv2r.v v8, v14
+; CHECK-NEXT: vloxseg3ei32.v v6, (a0), v12, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>} @llvm.riscv.vloxseg3.mask.nxv2i64.nxv2i32(<vscale x 2 x i64> %val,<vscale x 2 x i64> %val,<vscale x 2 x i64> %val, i64* %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
define <vscale x 2 x i64> @test_vloxseg3_mask_nxv2i64_nxv2i8(<vscale x 2 x i64> %val, i64* %base, <vscale x 2 x i8> %index, i64 %vl, <vscale x 2 x i1> %mask) {
; CHECK-LABEL: test_vloxseg3_mask_nxv2i64_nxv2i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v6, v8
+; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
-; CHECK-NEXT: vloxseg3ei8.v v12, (a0), v10, v0.t
-; CHECK-NEXT: vmv2r.v v8, v14
+; CHECK-NEXT: vloxseg3ei8.v v6, (a0), v12, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>} @llvm.riscv.vloxseg3.mask.nxv2i64.nxv2i8(<vscale x 2 x i64> %val,<vscale x 2 x i64> %val,<vscale x 2 x i64> %val, i64* %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
define <vscale x 2 x i64> @test_vloxseg3_mask_nxv2i64_nxv2i16(<vscale x 2 x i64> %val, i64* %base, <vscale x 2 x i16> %index, i64 %vl, <vscale x 2 x i1> %mask) {
; CHECK-LABEL: test_vloxseg3_mask_nxv2i64_nxv2i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v6, v8
+; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
-; CHECK-NEXT: vloxseg3ei16.v v12, (a0), v10, v0.t
-; CHECK-NEXT: vmv2r.v v8, v14
+; CHECK-NEXT: vloxseg3ei16.v v6, (a0), v12, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>} @llvm.riscv.vloxseg3.mask.nxv2i64.nxv2i16(<vscale x 2 x i64> %val,<vscale x 2 x i64> %val,<vscale x 2 x i64> %val, i64* %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
define <vscale x 2 x i64> @test_vloxseg3_mask_nxv2i64_nxv2i64(<vscale x 2 x i64> %val, i64* %base, <vscale x 2 x i64> %index, i64 %vl, <vscale x 2 x i1> %mask) {
; CHECK-LABEL: test_vloxseg3_mask_nxv2i64_nxv2i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v6, v8
+; CHECK-NEXT: vmv2r.v v12, v10
+; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
-; CHECK-NEXT: vloxseg3ei64.v v12, (a0), v10, v0.t
-; CHECK-NEXT: vmv2r.v v8, v14
+; CHECK-NEXT: vloxseg3ei64.v v6, (a0), v12, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>} @llvm.riscv.vloxseg3.mask.nxv2i64.nxv2i64(<vscale x 2 x i64> %val,<vscale x 2 x i64> %val,<vscale x 2 x i64> %val, i64* %base, <vscale x 2 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
; CHECK-LABEL: test_vloxseg4_mask_nxv2i64_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
; CHECK-NEXT: vloxseg4ei32.v v12, (a0), v10, v0.t
; CHECK-NEXT: vmv2r.v v8, v14
; CHECK-LABEL: test_vloxseg4_mask_nxv2i64_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
; CHECK-NEXT: vloxseg4ei8.v v12, (a0), v10, v0.t
; CHECK-NEXT: vmv2r.v v8, v14
; CHECK-LABEL: test_vloxseg4_mask_nxv2i64_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
; CHECK-NEXT: vloxseg4ei16.v v12, (a0), v10, v0.t
; CHECK-NEXT: vmv2r.v v8, v14
; CHECK-LABEL: test_vloxseg4_mask_nxv2i64_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
; CHECK-NEXT: vloxseg4ei64.v v12, (a0), v10, v0.t
; CHECK-NEXT: vmv2r.v v8, v14
; CHECK-NEXT: vmv4r.v v4, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
; CHECK-NEXT: vloxseg2ei16.v v4, (a0), v12, v0.t
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 16 x half>,<vscale x 16 x half>} @llvm.riscv.vloxseg2.mask.nxv16f16.nxv16i16(<vscale x 16 x half> %val,<vscale x 16 x half> %val, half* %base, <vscale x 16 x i16> %index, <vscale x 16 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv4r.v v4, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
; CHECK-NEXT: vloxseg2ei8.v v4, (a0), v12, v0.t
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 16 x half>,<vscale x 16 x half>} @llvm.riscv.vloxseg2.mask.nxv16f16.nxv16i8(<vscale x 16 x half> %val,<vscale x 16 x half> %val, half* %base, <vscale x 16 x i8> %index, <vscale x 16 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv4r.v v4, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
; CHECK-NEXT: vloxseg2ei32.v v4, (a0), v16, v0.t
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 16 x half>,<vscale x 16 x half>} @llvm.riscv.vloxseg2.mask.nxv16f16.nxv16i32(<vscale x 16 x half> %val,<vscale x 16 x half> %val, half* %base, <vscale x 16 x i32> %index, <vscale x 16 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv4r.v v4, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu
; CHECK-NEXT: vloxseg2ei32.v v4, (a0), v12, v0.t
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x double>,<vscale x 4 x double>} @llvm.riscv.vloxseg2.mask.nxv4f64.nxv4i32(<vscale x 4 x double> %val,<vscale x 4 x double> %val, double* %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv4r.v v4, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu
; CHECK-NEXT: vloxseg2ei8.v v4, (a0), v12, v0.t
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x double>,<vscale x 4 x double>} @llvm.riscv.vloxseg2.mask.nxv4f64.nxv4i8(<vscale x 4 x double> %val,<vscale x 4 x double> %val, double* %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv4r.v v4, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu
; CHECK-NEXT: vloxseg2ei64.v v4, (a0), v12, v0.t
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x double>,<vscale x 4 x double>} @llvm.riscv.vloxseg2.mask.nxv4f64.nxv4i64(<vscale x 4 x double> %val,<vscale x 4 x double> %val, double* %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv4r.v v4, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu
; CHECK-NEXT: vloxseg2ei16.v v4, (a0), v12, v0.t
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x double>,<vscale x 4 x double>} @llvm.riscv.vloxseg2.mask.nxv4f64.nxv4i16(<vscale x 4 x double> %val,<vscale x 4 x double> %val, double* %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vloxseg2ei64.v v7, (a0), v9, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vloxseg2.mask.nxv1f64.nxv1i64(<vscale x 1 x double> %val,<vscale x 1 x double> %val, double* %base, <vscale x 1 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vloxseg2ei32.v v7, (a0), v9, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vloxseg2.mask.nxv1f64.nxv1i32(<vscale x 1 x double> %val,<vscale x 1 x double> %val, double* %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vloxseg2ei16.v v7, (a0), v9, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vloxseg2.mask.nxv1f64.nxv1i16(<vscale x 1 x double> %val,<vscale x 1 x double> %val, double* %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vloxseg2ei8.v v7, (a0), v9, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vloxseg2.mask.nxv1f64.nxv1i8(<vscale x 1 x double> %val,<vscale x 1 x double> %val, double* %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
define <vscale x 1 x double> @test_vloxseg3_mask_nxv1f64_nxv1i64(<vscale x 1 x double> %val, double* %base, <vscale x 1 x i64> %index, i64 %vl, <vscale x 1 x i1> %mask) {
; CHECK-LABEL: test_vloxseg3_mask_nxv1f64_nxv1i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v10, v9
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
-; CHECK-NEXT: vloxseg3ei64.v v10, (a0), v9, v0.t
-; CHECK-NEXT: vmv1r.v v8, v11
+; CHECK-NEXT: vloxseg3ei64.v v7, (a0), v10, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vloxseg3.mask.nxv1f64.nxv1i64(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, double* %base, <vscale x 1 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
define <vscale x 1 x double> @test_vloxseg3_mask_nxv1f64_nxv1i32(<vscale x 1 x double> %val, double* %base, <vscale x 1 x i32> %index, i64 %vl, <vscale x 1 x i1> %mask) {
; CHECK-LABEL: test_vloxseg3_mask_nxv1f64_nxv1i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v10, v9
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
-; CHECK-NEXT: vloxseg3ei32.v v10, (a0), v9, v0.t
-; CHECK-NEXT: vmv1r.v v8, v11
+; CHECK-NEXT: vloxseg3ei32.v v7, (a0), v10, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vloxseg3.mask.nxv1f64.nxv1i32(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, double* %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
define <vscale x 1 x double> @test_vloxseg3_mask_nxv1f64_nxv1i16(<vscale x 1 x double> %val, double* %base, <vscale x 1 x i16> %index, i64 %vl, <vscale x 1 x i1> %mask) {
; CHECK-LABEL: test_vloxseg3_mask_nxv1f64_nxv1i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v10, v9
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
-; CHECK-NEXT: vloxseg3ei16.v v10, (a0), v9, v0.t
-; CHECK-NEXT: vmv1r.v v8, v11
+; CHECK-NEXT: vloxseg3ei16.v v7, (a0), v10, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vloxseg3.mask.nxv1f64.nxv1i16(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, double* %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
define <vscale x 1 x double> @test_vloxseg3_mask_nxv1f64_nxv1i8(<vscale x 1 x double> %val, double* %base, <vscale x 1 x i8> %index, i64 %vl, <vscale x 1 x i1> %mask) {
; CHECK-LABEL: test_vloxseg3_mask_nxv1f64_nxv1i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v10, v9
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
-; CHECK-NEXT: vloxseg3ei8.v v10, (a0), v9, v0.t
-; CHECK-NEXT: vmv1r.v v8, v11
+; CHECK-NEXT: vloxseg3ei8.v v7, (a0), v10, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vloxseg3.mask.nxv1f64.nxv1i8(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, double* %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
; CHECK-LABEL: test_vloxseg4_mask_nxv1f64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vloxseg4ei64.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg4_mask_nxv1f64_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vloxseg4ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg4_mask_nxv1f64_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vloxseg4ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg4_mask_nxv1f64_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vloxseg4ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg5_mask_nxv1f64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vloxseg5ei64.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg5_mask_nxv1f64_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vloxseg5ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg5_mask_nxv1f64_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vloxseg5ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg5_mask_nxv1f64_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vloxseg5ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg6_mask_nxv1f64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vloxseg6ei64.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg6_mask_nxv1f64_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vloxseg6ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg6_mask_nxv1f64_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vloxseg6ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg6_mask_nxv1f64_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vloxseg6ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg7_mask_nxv1f64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vloxseg7ei64.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg7_mask_nxv1f64_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vloxseg7ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg7_mask_nxv1f64_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vloxseg7ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg7_mask_nxv1f64_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vloxseg7ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg8_mask_nxv1f64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vloxseg8ei64.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg8_mask_nxv1f64_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vloxseg8ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg8_mask_nxv1f64_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vloxseg8ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg8_mask_nxv1f64_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vloxseg8ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vloxseg2ei32.v v7, (a0), v9, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vloxseg2.mask.nxv2f32.nxv2i32(<vscale x 2 x float> %val,<vscale x 2 x float> %val, float* %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vloxseg2ei8.v v7, (a0), v9, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vloxseg2.mask.nxv2f32.nxv2i8(<vscale x 2 x float> %val,<vscale x 2 x float> %val, float* %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vloxseg2ei16.v v7, (a0), v9, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vloxseg2.mask.nxv2f32.nxv2i16(<vscale x 2 x float> %val,<vscale x 2 x float> %val, float* %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vloxseg2ei64.v v7, (a0), v10, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vloxseg2.mask.nxv2f32.nxv2i64(<vscale x 2 x float> %val,<vscale x 2 x float> %val, float* %base, <vscale x 2 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
define <vscale x 2 x float> @test_vloxseg3_mask_nxv2f32_nxv2i32(<vscale x 2 x float> %val, float* %base, <vscale x 2 x i32> %index, i64 %vl, <vscale x 2 x i1> %mask) {
; CHECK-LABEL: test_vloxseg3_mask_nxv2f32_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v10, v9
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
-; CHECK-NEXT: vloxseg3ei32.v v10, (a0), v9, v0.t
-; CHECK-NEXT: vmv1r.v v8, v11
+; CHECK-NEXT: vloxseg3ei32.v v7, (a0), v10, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vloxseg3.mask.nxv2f32.nxv2i32(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, float* %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
define <vscale x 2 x float> @test_vloxseg3_mask_nxv2f32_nxv2i8(<vscale x 2 x float> %val, float* %base, <vscale x 2 x i8> %index, i64 %vl, <vscale x 2 x i1> %mask) {
; CHECK-LABEL: test_vloxseg3_mask_nxv2f32_nxv2i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v10, v9
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
-; CHECK-NEXT: vloxseg3ei8.v v10, (a0), v9, v0.t
-; CHECK-NEXT: vmv1r.v v8, v11
+; CHECK-NEXT: vloxseg3ei8.v v7, (a0), v10, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vloxseg3.mask.nxv2f32.nxv2i8(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, float* %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
define <vscale x 2 x float> @test_vloxseg3_mask_nxv2f32_nxv2i16(<vscale x 2 x float> %val, float* %base, <vscale x 2 x i16> %index, i64 %vl, <vscale x 2 x i1> %mask) {
; CHECK-LABEL: test_vloxseg3_mask_nxv2f32_nxv2i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v10, v9
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
-; CHECK-NEXT: vloxseg3ei16.v v10, (a0), v9, v0.t
-; CHECK-NEXT: vmv1r.v v8, v11
+; CHECK-NEXT: vloxseg3ei16.v v7, (a0), v10, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vloxseg3.mask.nxv2f32.nxv2i16(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, float* %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
; CHECK-LABEL: test_vloxseg3_mask_nxv2f32_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vloxseg3ei64.v v7, (a0), v10, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vloxseg3.mask.nxv2f32.nxv2i64(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, float* %base, <vscale x 2 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
; CHECK-LABEL: test_vloxseg4_mask_nxv2f32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vloxseg4ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg4_mask_nxv2f32_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vloxseg4ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg4_mask_nxv2f32_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vloxseg4ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
define <vscale x 2 x float> @test_vloxseg4_mask_nxv2f32_nxv2i64(<vscale x 2 x float> %val, float* %base, <vscale x 2 x i64> %index, i64 %vl, <vscale x 2 x i1> %mask) {
; CHECK-LABEL: test_vloxseg4_mask_nxv2f32_nxv2i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv2r.v v12, v10
+; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
-; CHECK-NEXT: vloxseg4ei64.v v12, (a0), v10, v0.t
-; CHECK-NEXT: vmv1r.v v8, v13
+; CHECK-NEXT: vloxseg4ei64.v v7, (a0), v12, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vloxseg4.mask.nxv2f32.nxv2i64(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, float* %base, <vscale x 2 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
; CHECK-LABEL: test_vloxseg5_mask_nxv2f32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vloxseg5ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg5_mask_nxv2f32_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vloxseg5ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg5_mask_nxv2f32_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vloxseg5ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg5_mask_nxv2f32_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vloxseg5ei64.v v12, (a0), v10, v0.t
; CHECK-NEXT: vmv1r.v v8, v13
; CHECK-LABEL: test_vloxseg6_mask_nxv2f32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vloxseg6ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg6_mask_nxv2f32_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vloxseg6ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg6_mask_nxv2f32_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vloxseg6ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg6_mask_nxv2f32_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vloxseg6ei64.v v12, (a0), v10, v0.t
; CHECK-NEXT: vmv1r.v v8, v13
; CHECK-LABEL: test_vloxseg7_mask_nxv2f32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vloxseg7ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg7_mask_nxv2f32_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vloxseg7ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg7_mask_nxv2f32_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vloxseg7ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg7_mask_nxv2f32_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
-; CHECK-NEXT: vmv1r.v v18, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vloxseg7ei64.v v12, (a0), v10, v0.t
; CHECK-NEXT: vmv1r.v v8, v13
; CHECK-LABEL: test_vloxseg8_mask_nxv2f32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vloxseg8ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg8_mask_nxv2f32_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vloxseg8ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg8_mask_nxv2f32_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vloxseg8ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg8_mask_nxv2f32_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
-; CHECK-NEXT: vmv1r.v v18, v12
-; CHECK-NEXT: vmv1r.v v19, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
+; CHECK-NEXT: vmv1r.v v19, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vloxseg8ei64.v v12, (a0), v10, v0.t
; CHECK-NEXT: vmv1r.v v8, v13
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vloxseg2ei64.v v7, (a0), v9, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vloxseg2.mask.nxv1f16.nxv1i64(<vscale x 1 x half> %val,<vscale x 1 x half> %val, half* %base, <vscale x 1 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vloxseg2ei32.v v7, (a0), v9, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vloxseg2.mask.nxv1f16.nxv1i32(<vscale x 1 x half> %val,<vscale x 1 x half> %val, half* %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vloxseg2ei16.v v7, (a0), v9, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vloxseg2.mask.nxv1f16.nxv1i16(<vscale x 1 x half> %val,<vscale x 1 x half> %val, half* %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vloxseg2ei8.v v7, (a0), v9, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vloxseg2.mask.nxv1f16.nxv1i8(<vscale x 1 x half> %val,<vscale x 1 x half> %val, half* %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
define <vscale x 1 x half> @test_vloxseg3_mask_nxv1f16_nxv1i64(<vscale x 1 x half> %val, half* %base, <vscale x 1 x i64> %index, i64 %vl, <vscale x 1 x i1> %mask) {
; CHECK-LABEL: test_vloxseg3_mask_nxv1f16_nxv1i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v10, v9
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
-; CHECK-NEXT: vloxseg3ei64.v v10, (a0), v9, v0.t
-; CHECK-NEXT: vmv1r.v v8, v11
+; CHECK-NEXT: vloxseg3ei64.v v7, (a0), v10, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vloxseg3.mask.nxv1f16.nxv1i64(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, half* %base, <vscale x 1 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
define <vscale x 1 x half> @test_vloxseg3_mask_nxv1f16_nxv1i32(<vscale x 1 x half> %val, half* %base, <vscale x 1 x i32> %index, i64 %vl, <vscale x 1 x i1> %mask) {
; CHECK-LABEL: test_vloxseg3_mask_nxv1f16_nxv1i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v10, v9
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
-; CHECK-NEXT: vloxseg3ei32.v v10, (a0), v9, v0.t
-; CHECK-NEXT: vmv1r.v v8, v11
+; CHECK-NEXT: vloxseg3ei32.v v7, (a0), v10, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vloxseg3.mask.nxv1f16.nxv1i32(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, half* %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
define <vscale x 1 x half> @test_vloxseg3_mask_nxv1f16_nxv1i16(<vscale x 1 x half> %val, half* %base, <vscale x 1 x i16> %index, i64 %vl, <vscale x 1 x i1> %mask) {
; CHECK-LABEL: test_vloxseg3_mask_nxv1f16_nxv1i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v10, v9
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
-; CHECK-NEXT: vloxseg3ei16.v v10, (a0), v9, v0.t
-; CHECK-NEXT: vmv1r.v v8, v11
+; CHECK-NEXT: vloxseg3ei16.v v7, (a0), v10, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vloxseg3.mask.nxv1f16.nxv1i16(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, half* %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
define <vscale x 1 x half> @test_vloxseg3_mask_nxv1f16_nxv1i8(<vscale x 1 x half> %val, half* %base, <vscale x 1 x i8> %index, i64 %vl, <vscale x 1 x i1> %mask) {
; CHECK-LABEL: test_vloxseg3_mask_nxv1f16_nxv1i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v10, v9
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
-; CHECK-NEXT: vloxseg3ei8.v v10, (a0), v9, v0.t
-; CHECK-NEXT: vmv1r.v v8, v11
+; CHECK-NEXT: vloxseg3ei8.v v7, (a0), v10, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vloxseg3.mask.nxv1f16.nxv1i8(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, half* %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
; CHECK-LABEL: test_vloxseg4_mask_nxv1f16_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vloxseg4ei64.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg4_mask_nxv1f16_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vloxseg4ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg4_mask_nxv1f16_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vloxseg4ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg4_mask_nxv1f16_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vloxseg4ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg5_mask_nxv1f16_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vloxseg5ei64.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg5_mask_nxv1f16_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vloxseg5ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg5_mask_nxv1f16_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vloxseg5ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg5_mask_nxv1f16_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vloxseg5ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg6_mask_nxv1f16_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vloxseg6ei64.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg6_mask_nxv1f16_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vloxseg6ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg6_mask_nxv1f16_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vloxseg6ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg6_mask_nxv1f16_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vloxseg6ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg7_mask_nxv1f16_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vloxseg7ei64.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg7_mask_nxv1f16_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vloxseg7ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg7_mask_nxv1f16_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vloxseg7ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg7_mask_nxv1f16_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vloxseg7ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg8_mask_nxv1f16_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vloxseg8ei64.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg8_mask_nxv1f16_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vloxseg8ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg8_mask_nxv1f16_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vloxseg8ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg8_mask_nxv1f16_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vloxseg8ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vloxseg2ei64.v v7, (a0), v9, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vloxseg2.mask.nxv1f32.nxv1i64(<vscale x 1 x float> %val,<vscale x 1 x float> %val, float* %base, <vscale x 1 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vloxseg2ei32.v v7, (a0), v9, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vloxseg2.mask.nxv1f32.nxv1i32(<vscale x 1 x float> %val,<vscale x 1 x float> %val, float* %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vloxseg2ei16.v v7, (a0), v9, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vloxseg2.mask.nxv1f32.nxv1i16(<vscale x 1 x float> %val,<vscale x 1 x float> %val, float* %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vloxseg2ei8.v v7, (a0), v9, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vloxseg2.mask.nxv1f32.nxv1i8(<vscale x 1 x float> %val,<vscale x 1 x float> %val, float* %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
define <vscale x 1 x float> @test_vloxseg3_mask_nxv1f32_nxv1i64(<vscale x 1 x float> %val, float* %base, <vscale x 1 x i64> %index, i64 %vl, <vscale x 1 x i1> %mask) {
; CHECK-LABEL: test_vloxseg3_mask_nxv1f32_nxv1i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v10, v9
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
-; CHECK-NEXT: vloxseg3ei64.v v10, (a0), v9, v0.t
-; CHECK-NEXT: vmv1r.v v8, v11
+; CHECK-NEXT: vloxseg3ei64.v v7, (a0), v10, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vloxseg3.mask.nxv1f32.nxv1i64(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, float* %base, <vscale x 1 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
define <vscale x 1 x float> @test_vloxseg3_mask_nxv1f32_nxv1i32(<vscale x 1 x float> %val, float* %base, <vscale x 1 x i32> %index, i64 %vl, <vscale x 1 x i1> %mask) {
; CHECK-LABEL: test_vloxseg3_mask_nxv1f32_nxv1i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v10, v9
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
-; CHECK-NEXT: vloxseg3ei32.v v10, (a0), v9, v0.t
-; CHECK-NEXT: vmv1r.v v8, v11
+; CHECK-NEXT: vloxseg3ei32.v v7, (a0), v10, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vloxseg3.mask.nxv1f32.nxv1i32(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, float* %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
define <vscale x 1 x float> @test_vloxseg3_mask_nxv1f32_nxv1i16(<vscale x 1 x float> %val, float* %base, <vscale x 1 x i16> %index, i64 %vl, <vscale x 1 x i1> %mask) {
; CHECK-LABEL: test_vloxseg3_mask_nxv1f32_nxv1i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v10, v9
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
-; CHECK-NEXT: vloxseg3ei16.v v10, (a0), v9, v0.t
-; CHECK-NEXT: vmv1r.v v8, v11
+; CHECK-NEXT: vloxseg3ei16.v v7, (a0), v10, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vloxseg3.mask.nxv1f32.nxv1i16(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, float* %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
define <vscale x 1 x float> @test_vloxseg3_mask_nxv1f32_nxv1i8(<vscale x 1 x float> %val, float* %base, <vscale x 1 x i8> %index, i64 %vl, <vscale x 1 x i1> %mask) {
; CHECK-LABEL: test_vloxseg3_mask_nxv1f32_nxv1i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v10, v9
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
-; CHECK-NEXT: vloxseg3ei8.v v10, (a0), v9, v0.t
-; CHECK-NEXT: vmv1r.v v8, v11
+; CHECK-NEXT: vloxseg3ei8.v v7, (a0), v10, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vloxseg3.mask.nxv1f32.nxv1i8(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, float* %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
; CHECK-LABEL: test_vloxseg4_mask_nxv1f32_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vloxseg4ei64.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg4_mask_nxv1f32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vloxseg4ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg4_mask_nxv1f32_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vloxseg4ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg4_mask_nxv1f32_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vloxseg4ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg5_mask_nxv1f32_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vloxseg5ei64.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg5_mask_nxv1f32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vloxseg5ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg5_mask_nxv1f32_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vloxseg5ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg5_mask_nxv1f32_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vloxseg5ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg6_mask_nxv1f32_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vloxseg6ei64.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg6_mask_nxv1f32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vloxseg6ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg6_mask_nxv1f32_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vloxseg6ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg6_mask_nxv1f32_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vloxseg6ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg7_mask_nxv1f32_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vloxseg7ei64.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg7_mask_nxv1f32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vloxseg7ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg7_mask_nxv1f32_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vloxseg7ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg7_mask_nxv1f32_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vloxseg7ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg8_mask_nxv1f32_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vloxseg8ei64.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg8_mask_nxv1f32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vloxseg8ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg8_mask_nxv1f32_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vloxseg8ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg8_mask_nxv1f32_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vloxseg8ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-NEXT: vmv2r.v v6, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
; CHECK-NEXT: vloxseg2ei16.v v6, (a0), v10, v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vloxseg2.mask.nxv8f16.nxv8i16(<vscale x 8 x half> %val,<vscale x 8 x half> %val, half* %base, <vscale x 8 x i16> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv2r.v v6, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
; CHECK-NEXT: vloxseg2ei8.v v6, (a0), v10, v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vloxseg2.mask.nxv8f16.nxv8i8(<vscale x 8 x half> %val,<vscale x 8 x half> %val, half* %base, <vscale x 8 x i8> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv2r.v v6, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
; CHECK-NEXT: vloxseg2ei64.v v6, (a0), v16, v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vloxseg2.mask.nxv8f16.nxv8i64(<vscale x 8 x half> %val,<vscale x 8 x half> %val, half* %base, <vscale x 8 x i64> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv2r.v v6, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
; CHECK-NEXT: vloxseg2ei32.v v6, (a0), v12, v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vloxseg2.mask.nxv8f16.nxv8i32(<vscale x 8 x half> %val,<vscale x 8 x half> %val, half* %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
define <vscale x 8 x half> @test_vloxseg3_mask_nxv8f16_nxv8i16(<vscale x 8 x half> %val, half* %base, <vscale x 8 x i16> %index, i64 %vl, <vscale x 8 x i1> %mask) {
; CHECK-LABEL: test_vloxseg3_mask_nxv8f16_nxv8i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v6, v8
+; CHECK-NEXT: vmv2r.v v12, v10
+; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
-; CHECK-NEXT: vloxseg3ei16.v v12, (a0), v10, v0.t
-; CHECK-NEXT: vmv2r.v v8, v14
+; CHECK-NEXT: vloxseg3ei16.v v6, (a0), v12, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vloxseg3.mask.nxv8f16.nxv8i16(<vscale x 8 x half> %val,<vscale x 8 x half> %val,<vscale x 8 x half> %val, half* %base, <vscale x 8 x i16> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
define <vscale x 8 x half> @test_vloxseg3_mask_nxv8f16_nxv8i8(<vscale x 8 x half> %val, half* %base, <vscale x 8 x i8> %index, i64 %vl, <vscale x 8 x i1> %mask) {
; CHECK-LABEL: test_vloxseg3_mask_nxv8f16_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v6, v8
+; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
-; CHECK-NEXT: vloxseg3ei8.v v12, (a0), v10, v0.t
-; CHECK-NEXT: vmv2r.v v8, v14
+; CHECK-NEXT: vloxseg3ei8.v v6, (a0), v12, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vloxseg3.mask.nxv8f16.nxv8i8(<vscale x 8 x half> %val,<vscale x 8 x half> %val,<vscale x 8 x half> %val, half* %base, <vscale x 8 x i8> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
; CHECK-LABEL: test_vloxseg3_mask_nxv8f16_nxv8i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v6, v8
-; CHECK-NEXT: vmv2r.v v10, v6
+; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
; CHECK-NEXT: vloxseg3ei64.v v6, (a0), v16, v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vloxseg3.mask.nxv8f16.nxv8i64(<vscale x 8 x half> %val,<vscale x 8 x half> %val,<vscale x 8 x half> %val, half* %base, <vscale x 8 x i64> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
; CHECK-LABEL: test_vloxseg3_mask_nxv8f16_nxv8i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v6, v8
-; CHECK-NEXT: vmv2r.v v10, v6
+; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
; CHECK-NEXT: vloxseg3ei32.v v6, (a0), v12, v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vloxseg3.mask.nxv8f16.nxv8i32(<vscale x 8 x half> %val,<vscale x 8 x half> %val,<vscale x 8 x half> %val, half* %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
; CHECK-LABEL: test_vloxseg4_mask_nxv8f16_nxv8i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
; CHECK-NEXT: vloxseg4ei16.v v12, (a0), v10, v0.t
; CHECK-NEXT: vmv2r.v v8, v14
; CHECK-LABEL: test_vloxseg4_mask_nxv8f16_nxv8i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
; CHECK-NEXT: vloxseg4ei8.v v12, (a0), v10, v0.t
; CHECK-NEXT: vmv2r.v v8, v14
; CHECK-LABEL: test_vloxseg4_mask_nxv8f16_nxv8i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v6, v8
-; CHECK-NEXT: vmv2r.v v10, v6
-; CHECK-NEXT: vmv2r.v v12, v6
+; CHECK-NEXT: vmv2r.v v10, v8
+; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
; CHECK-NEXT: vloxseg4ei64.v v6, (a0), v16, v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vloxseg4.mask.nxv8f16.nxv8i64(<vscale x 8 x half> %val,<vscale x 8 x half> %val,<vscale x 8 x half> %val,<vscale x 8 x half> %val, half* %base, <vscale x 8 x i64> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
define <vscale x 8 x half> @test_vloxseg4_mask_nxv8f16_nxv8i32(<vscale x 8 x half> %val, half* %base, <vscale x 8 x i32> %index, i64 %vl, <vscale x 8 x i1> %mask) {
; CHECK-LABEL: test_vloxseg4_mask_nxv8f16_nxv8i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv2r.v v16, v8
-; CHECK-NEXT: vmv2r.v v18, v16
-; CHECK-NEXT: vmv2r.v v20, v16
-; CHECK-NEXT: vmv2r.v v22, v16
+; CHECK-NEXT: vmv2r.v v6, v8
+; CHECK-NEXT: vmv2r.v v10, v8
+; CHECK-NEXT: vmv4r.v v16, v12
+; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
-; CHECK-NEXT: vloxseg4ei32.v v16, (a0), v12, v0.t
-; CHECK-NEXT: vmv2r.v v8, v18
+; CHECK-NEXT: vloxseg4ei32.v v6, (a0), v16, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vloxseg4.mask.nxv8f16.nxv8i32(<vscale x 8 x half> %val,<vscale x 8 x half> %val,<vscale x 8 x half> %val,<vscale x 8 x half> %val, half* %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv4r.v v4, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
; CHECK-NEXT: vloxseg2ei16.v v4, (a0), v12, v0.t
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x float>,<vscale x 8 x float>} @llvm.riscv.vloxseg2.mask.nxv8f32.nxv8i16(<vscale x 8 x float> %val,<vscale x 8 x float> %val, float* %base, <vscale x 8 x i16> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv4r.v v4, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
; CHECK-NEXT: vloxseg2ei8.v v4, (a0), v12, v0.t
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x float>,<vscale x 8 x float>} @llvm.riscv.vloxseg2.mask.nxv8f32.nxv8i8(<vscale x 8 x float> %val,<vscale x 8 x float> %val, float* %base, <vscale x 8 x i8> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv4r.v v4, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
; CHECK-NEXT: vloxseg2ei64.v v4, (a0), v16, v0.t
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x float>,<vscale x 8 x float>} @llvm.riscv.vloxseg2.mask.nxv8f32.nxv8i64(<vscale x 8 x float> %val,<vscale x 8 x float> %val, float* %base, <vscale x 8 x i64> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv4r.v v4, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
; CHECK-NEXT: vloxseg2ei32.v v4, (a0), v12, v0.t
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x float>,<vscale x 8 x float>} @llvm.riscv.vloxseg2.mask.nxv8f32.nxv8i32(<vscale x 8 x float> %val,<vscale x 8 x float> %val, float* %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv2r.v v6, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
; CHECK-NEXT: vloxseg2ei32.v v6, (a0), v10, v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vloxseg2.mask.nxv2f64.nxv2i32(<vscale x 2 x double> %val,<vscale x 2 x double> %val, double* %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv2r.v v6, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
; CHECK-NEXT: vloxseg2ei8.v v6, (a0), v10, v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vloxseg2.mask.nxv2f64.nxv2i8(<vscale x 2 x double> %val,<vscale x 2 x double> %val, double* %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv2r.v v6, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
; CHECK-NEXT: vloxseg2ei16.v v6, (a0), v10, v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vloxseg2.mask.nxv2f64.nxv2i16(<vscale x 2 x double> %val,<vscale x 2 x double> %val, double* %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv2r.v v6, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
; CHECK-NEXT: vloxseg2ei64.v v6, (a0), v10, v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vloxseg2.mask.nxv2f64.nxv2i64(<vscale x 2 x double> %val,<vscale x 2 x double> %val, double* %base, <vscale x 2 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
define <vscale x 2 x double> @test_vloxseg3_mask_nxv2f64_nxv2i32(<vscale x 2 x double> %val, double* %base, <vscale x 2 x i32> %index, i64 %vl, <vscale x 2 x i1> %mask) {
; CHECK-LABEL: test_vloxseg3_mask_nxv2f64_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v6, v8
+; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
-; CHECK-NEXT: vloxseg3ei32.v v12, (a0), v10, v0.t
-; CHECK-NEXT: vmv2r.v v8, v14
+; CHECK-NEXT: vloxseg3ei32.v v6, (a0), v12, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vloxseg3.mask.nxv2f64.nxv2i32(<vscale x 2 x double> %val,<vscale x 2 x double> %val,<vscale x 2 x double> %val, double* %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
define <vscale x 2 x double> @test_vloxseg3_mask_nxv2f64_nxv2i8(<vscale x 2 x double> %val, double* %base, <vscale x 2 x i8> %index, i64 %vl, <vscale x 2 x i1> %mask) {
; CHECK-LABEL: test_vloxseg3_mask_nxv2f64_nxv2i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v6, v8
+; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
-; CHECK-NEXT: vloxseg3ei8.v v12, (a0), v10, v0.t
-; CHECK-NEXT: vmv2r.v v8, v14
+; CHECK-NEXT: vloxseg3ei8.v v6, (a0), v12, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vloxseg3.mask.nxv2f64.nxv2i8(<vscale x 2 x double> %val,<vscale x 2 x double> %val,<vscale x 2 x double> %val, double* %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
define <vscale x 2 x double> @test_vloxseg3_mask_nxv2f64_nxv2i16(<vscale x 2 x double> %val, double* %base, <vscale x 2 x i16> %index, i64 %vl, <vscale x 2 x i1> %mask) {
; CHECK-LABEL: test_vloxseg3_mask_nxv2f64_nxv2i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v6, v8
+; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
-; CHECK-NEXT: vloxseg3ei16.v v12, (a0), v10, v0.t
-; CHECK-NEXT: vmv2r.v v8, v14
+; CHECK-NEXT: vloxseg3ei16.v v6, (a0), v12, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vloxseg3.mask.nxv2f64.nxv2i16(<vscale x 2 x double> %val,<vscale x 2 x double> %val,<vscale x 2 x double> %val, double* %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
define <vscale x 2 x double> @test_vloxseg3_mask_nxv2f64_nxv2i64(<vscale x 2 x double> %val, double* %base, <vscale x 2 x i64> %index, i64 %vl, <vscale x 2 x i1> %mask) {
; CHECK-LABEL: test_vloxseg3_mask_nxv2f64_nxv2i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v6, v8
+; CHECK-NEXT: vmv2r.v v12, v10
+; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
-; CHECK-NEXT: vloxseg3ei64.v v12, (a0), v10, v0.t
-; CHECK-NEXT: vmv2r.v v8, v14
+; CHECK-NEXT: vloxseg3ei64.v v6, (a0), v12, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vloxseg3.mask.nxv2f64.nxv2i64(<vscale x 2 x double> %val,<vscale x 2 x double> %val,<vscale x 2 x double> %val, double* %base, <vscale x 2 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
; CHECK-LABEL: test_vloxseg4_mask_nxv2f64_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
; CHECK-NEXT: vloxseg4ei32.v v12, (a0), v10, v0.t
; CHECK-NEXT: vmv2r.v v8, v14
; CHECK-LABEL: test_vloxseg4_mask_nxv2f64_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
; CHECK-NEXT: vloxseg4ei8.v v12, (a0), v10, v0.t
; CHECK-NEXT: vmv2r.v v8, v14
; CHECK-LABEL: test_vloxseg4_mask_nxv2f64_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
; CHECK-NEXT: vloxseg4ei16.v v12, (a0), v10, v0.t
; CHECK-NEXT: vmv2r.v v8, v14
; CHECK-LABEL: test_vloxseg4_mask_nxv2f64_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
; CHECK-NEXT: vloxseg4ei64.v v12, (a0), v10, v0.t
; CHECK-NEXT: vmv2r.v v8, v14
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vloxseg2ei32.v v7, (a0), v10, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vloxseg2.mask.nxv4f16.nxv4i32(<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vloxseg2ei8.v v7, (a0), v9, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vloxseg2.mask.nxv4f16.nxv4i8(<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vloxseg2ei64.v v7, (a0), v12, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vloxseg2.mask.nxv4f16.nxv4i64(<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vloxseg2ei16.v v7, (a0), v9, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vloxseg2.mask.nxv4f16.nxv4i16(<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
; CHECK-LABEL: test_vloxseg3_mask_nxv4f16_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vloxseg3ei32.v v7, (a0), v10, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vloxseg3.mask.nxv4f16.nxv4i32(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
define <vscale x 4 x half> @test_vloxseg3_mask_nxv4f16_nxv4i8(<vscale x 4 x half> %val, half* %base, <vscale x 4 x i8> %index, i64 %vl, <vscale x 4 x i1> %mask) {
; CHECK-LABEL: test_vloxseg3_mask_nxv4f16_nxv4i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v10, v9
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
-; CHECK-NEXT: vloxseg3ei8.v v10, (a0), v9, v0.t
-; CHECK-NEXT: vmv1r.v v8, v11
+; CHECK-NEXT: vloxseg3ei8.v v7, (a0), v10, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vloxseg3.mask.nxv4f16.nxv4i8(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
; CHECK-LABEL: test_vloxseg3_mask_nxv4f16_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vloxseg3ei64.v v7, (a0), v12, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vloxseg3.mask.nxv4f16.nxv4i64(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
define <vscale x 4 x half> @test_vloxseg3_mask_nxv4f16_nxv4i16(<vscale x 4 x half> %val, half* %base, <vscale x 4 x i16> %index, i64 %vl, <vscale x 4 x i1> %mask) {
; CHECK-LABEL: test_vloxseg3_mask_nxv4f16_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v10, v9
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
-; CHECK-NEXT: vloxseg3ei16.v v10, (a0), v9, v0.t
-; CHECK-NEXT: vmv1r.v v8, v11
+; CHECK-NEXT: vloxseg3ei16.v v7, (a0), v10, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vloxseg3.mask.nxv4f16.nxv4i16(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
define <vscale x 4 x half> @test_vloxseg4_mask_nxv4f16_nxv4i32(<vscale x 4 x half> %val, half* %base, <vscale x 4 x i32> %index, i64 %vl, <vscale x 4 x i1> %mask) {
; CHECK-LABEL: test_vloxseg4_mask_nxv4f16_nxv4i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv2r.v v12, v10
+; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
-; CHECK-NEXT: vloxseg4ei32.v v12, (a0), v10, v0.t
-; CHECK-NEXT: vmv1r.v v8, v13
+; CHECK-NEXT: vloxseg4ei32.v v7, (a0), v12, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vloxseg4.mask.nxv4f16.nxv4i32(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
; CHECK-LABEL: test_vloxseg4_mask_nxv4f16_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vloxseg4ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg4_mask_nxv4f16_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
-; CHECK-NEXT: vmv1r.v v10, v7
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vloxseg4ei64.v v7, (a0), v12, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vloxseg4.mask.nxv4f16.nxv4i64(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
; CHECK-LABEL: test_vloxseg4_mask_nxv4f16_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vloxseg4ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg5_mask_nxv4f16_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vloxseg5ei32.v v12, (a0), v10, v0.t
; CHECK-NEXT: vmv1r.v v8, v13
; CHECK-LABEL: test_vloxseg5_mask_nxv4f16_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vloxseg5ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg5_mask_nxv4f16_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
-; CHECK-NEXT: vmv1r.v v10, v7
-; CHECK-NEXT: vmv1r.v v11, v7
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vmv1r.v v11, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vloxseg5ei64.v v7, (a0), v12, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vloxseg5.mask.nxv4f16.nxv4i64(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
; CHECK-LABEL: test_vloxseg5_mask_nxv4f16_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vloxseg5ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg6_mask_nxv4f16_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vloxseg6ei32.v v12, (a0), v10, v0.t
; CHECK-NEXT: vmv1r.v v8, v13
; CHECK-LABEL: test_vloxseg6_mask_nxv4f16_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vloxseg6ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
define <vscale x 4 x half> @test_vloxseg6_mask_nxv4f16_nxv4i64(<vscale x 4 x half> %val, half* %base, <vscale x 4 x i64> %index, i64 %vl, <vscale x 4 x i1> %mask) {
; CHECK-LABEL: test_vloxseg6_mask_nxv4f16_nxv4i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v16, v8
-; CHECK-NEXT: vmv1r.v v17, v16
-; CHECK-NEXT: vmv1r.v v18, v16
-; CHECK-NEXT: vmv1r.v v19, v16
-; CHECK-NEXT: vmv1r.v v20, v16
-; CHECK-NEXT: vmv1r.v v21, v16
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv4r.v v16, v12
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
-; CHECK-NEXT: vloxseg6ei64.v v16, (a0), v12, v0.t
-; CHECK-NEXT: vmv1r.v v8, v17
+; CHECK-NEXT: vloxseg6ei64.v v7, (a0), v16, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vloxseg6.mask.nxv4f16.nxv4i64(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
; CHECK-LABEL: test_vloxseg6_mask_nxv4f16_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vloxseg6ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg7_mask_nxv4f16_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
-; CHECK-NEXT: vmv1r.v v18, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vloxseg7ei32.v v12, (a0), v10, v0.t
; CHECK-NEXT: vmv1r.v v8, v13
; CHECK-LABEL: test_vloxseg7_mask_nxv4f16_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vloxseg7ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg7_mask_nxv4f16_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v16, v8
-; CHECK-NEXT: vmv1r.v v17, v16
-; CHECK-NEXT: vmv1r.v v18, v16
-; CHECK-NEXT: vmv1r.v v19, v16
-; CHECK-NEXT: vmv1r.v v20, v16
-; CHECK-NEXT: vmv1r.v v21, v16
-; CHECK-NEXT: vmv1r.v v22, v16
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
+; CHECK-NEXT: vmv1r.v v19, v8
+; CHECK-NEXT: vmv1r.v v20, v8
+; CHECK-NEXT: vmv1r.v v21, v8
+; CHECK-NEXT: vmv1r.v v22, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vloxseg7ei64.v v16, (a0), v12, v0.t
; CHECK-NEXT: vmv1r.v v8, v17
; CHECK-LABEL: test_vloxseg7_mask_nxv4f16_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vloxseg7ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg8_mask_nxv4f16_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
-; CHECK-NEXT: vmv1r.v v18, v12
-; CHECK-NEXT: vmv1r.v v19, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
+; CHECK-NEXT: vmv1r.v v19, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vloxseg8ei32.v v12, (a0), v10, v0.t
; CHECK-NEXT: vmv1r.v v8, v13
; CHECK-LABEL: test_vloxseg8_mask_nxv4f16_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vloxseg8ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg8_mask_nxv4f16_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v16, v8
-; CHECK-NEXT: vmv1r.v v17, v16
-; CHECK-NEXT: vmv1r.v v18, v16
-; CHECK-NEXT: vmv1r.v v19, v16
-; CHECK-NEXT: vmv1r.v v20, v16
-; CHECK-NEXT: vmv1r.v v21, v16
-; CHECK-NEXT: vmv1r.v v22, v16
-; CHECK-NEXT: vmv1r.v v23, v16
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
+; CHECK-NEXT: vmv1r.v v19, v8
+; CHECK-NEXT: vmv1r.v v20, v8
+; CHECK-NEXT: vmv1r.v v21, v8
+; CHECK-NEXT: vmv1r.v v22, v8
+; CHECK-NEXT: vmv1r.v v23, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vloxseg8ei64.v v16, (a0), v12, v0.t
; CHECK-NEXT: vmv1r.v v8, v17
; CHECK-LABEL: test_vloxseg8_mask_nxv4f16_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vloxseg8ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vloxseg2ei32.v v7, (a0), v9, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vloxseg2.mask.nxv2f16.nxv2i32(<vscale x 2 x half> %val,<vscale x 2 x half> %val, half* %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vloxseg2ei8.v v7, (a0), v9, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vloxseg2.mask.nxv2f16.nxv2i8(<vscale x 2 x half> %val,<vscale x 2 x half> %val, half* %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vloxseg2ei16.v v7, (a0), v9, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vloxseg2.mask.nxv2f16.nxv2i16(<vscale x 2 x half> %val,<vscale x 2 x half> %val, half* %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vloxseg2ei64.v v7, (a0), v10, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vloxseg2.mask.nxv2f16.nxv2i64(<vscale x 2 x half> %val,<vscale x 2 x half> %val, half* %base, <vscale x 2 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
define <vscale x 2 x half> @test_vloxseg3_mask_nxv2f16_nxv2i32(<vscale x 2 x half> %val, half* %base, <vscale x 2 x i32> %index, i64 %vl, <vscale x 2 x i1> %mask) {
; CHECK-LABEL: test_vloxseg3_mask_nxv2f16_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v10, v9
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
-; CHECK-NEXT: vloxseg3ei32.v v10, (a0), v9, v0.t
-; CHECK-NEXT: vmv1r.v v8, v11
+; CHECK-NEXT: vloxseg3ei32.v v7, (a0), v10, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vloxseg3.mask.nxv2f16.nxv2i32(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, half* %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
define <vscale x 2 x half> @test_vloxseg3_mask_nxv2f16_nxv2i8(<vscale x 2 x half> %val, half* %base, <vscale x 2 x i8> %index, i64 %vl, <vscale x 2 x i1> %mask) {
; CHECK-LABEL: test_vloxseg3_mask_nxv2f16_nxv2i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v10, v9
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
-; CHECK-NEXT: vloxseg3ei8.v v10, (a0), v9, v0.t
-; CHECK-NEXT: vmv1r.v v8, v11
+; CHECK-NEXT: vloxseg3ei8.v v7, (a0), v10, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vloxseg3.mask.nxv2f16.nxv2i8(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, half* %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
define <vscale x 2 x half> @test_vloxseg3_mask_nxv2f16_nxv2i16(<vscale x 2 x half> %val, half* %base, <vscale x 2 x i16> %index, i64 %vl, <vscale x 2 x i1> %mask) {
; CHECK-LABEL: test_vloxseg3_mask_nxv2f16_nxv2i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v10, v9
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
-; CHECK-NEXT: vloxseg3ei16.v v10, (a0), v9, v0.t
-; CHECK-NEXT: vmv1r.v v8, v11
+; CHECK-NEXT: vloxseg3ei16.v v7, (a0), v10, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vloxseg3.mask.nxv2f16.nxv2i16(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, half* %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
; CHECK-LABEL: test_vloxseg3_mask_nxv2f16_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vloxseg3ei64.v v7, (a0), v10, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vloxseg3.mask.nxv2f16.nxv2i64(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, half* %base, <vscale x 2 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
; CHECK-LABEL: test_vloxseg4_mask_nxv2f16_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vloxseg4ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg4_mask_nxv2f16_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vloxseg4ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg4_mask_nxv2f16_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vloxseg4ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
define <vscale x 2 x half> @test_vloxseg4_mask_nxv2f16_nxv2i64(<vscale x 2 x half> %val, half* %base, <vscale x 2 x i64> %index, i64 %vl, <vscale x 2 x i1> %mask) {
; CHECK-LABEL: test_vloxseg4_mask_nxv2f16_nxv2i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv2r.v v12, v10
+; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
-; CHECK-NEXT: vloxseg4ei64.v v12, (a0), v10, v0.t
-; CHECK-NEXT: vmv1r.v v8, v13
+; CHECK-NEXT: vloxseg4ei64.v v7, (a0), v12, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vloxseg4.mask.nxv2f16.nxv2i64(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, half* %base, <vscale x 2 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
; CHECK-LABEL: test_vloxseg5_mask_nxv2f16_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vloxseg5ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg5_mask_nxv2f16_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vloxseg5ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg5_mask_nxv2f16_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vloxseg5ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg5_mask_nxv2f16_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vloxseg5ei64.v v12, (a0), v10, v0.t
; CHECK-NEXT: vmv1r.v v8, v13
; CHECK-LABEL: test_vloxseg6_mask_nxv2f16_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vloxseg6ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg6_mask_nxv2f16_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vloxseg6ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg6_mask_nxv2f16_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vloxseg6ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg6_mask_nxv2f16_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vloxseg6ei64.v v12, (a0), v10, v0.t
; CHECK-NEXT: vmv1r.v v8, v13
; CHECK-LABEL: test_vloxseg7_mask_nxv2f16_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vloxseg7ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg7_mask_nxv2f16_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vloxseg7ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg7_mask_nxv2f16_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vloxseg7ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg7_mask_nxv2f16_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
-; CHECK-NEXT: vmv1r.v v18, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vloxseg7ei64.v v12, (a0), v10, v0.t
; CHECK-NEXT: vmv1r.v v8, v13
; CHECK-LABEL: test_vloxseg8_mask_nxv2f16_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vloxseg8ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg8_mask_nxv2f16_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vloxseg8ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg8_mask_nxv2f16_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vloxseg8ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vloxseg8_mask_nxv2f16_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
-; CHECK-NEXT: vmv1r.v v18, v12
-; CHECK-NEXT: vmv1r.v v19, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
+; CHECK-NEXT: vmv1r.v v19, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vloxseg8ei64.v v12, (a0), v10, v0.t
; CHECK-NEXT: vmv1r.v v8, v13
; CHECK-NEXT: vmv2r.v v6, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; CHECK-NEXT: vloxseg2ei32.v v6, (a0), v10, v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vloxseg2.mask.nxv4f32.nxv4i32(<vscale x 4 x float> %val,<vscale x 4 x float> %val, float* %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv2r.v v6, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; CHECK-NEXT: vloxseg2ei8.v v6, (a0), v10, v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vloxseg2.mask.nxv4f32.nxv4i8(<vscale x 4 x float> %val,<vscale x 4 x float> %val, float* %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv2r.v v6, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; CHECK-NEXT: vloxseg2ei64.v v6, (a0), v12, v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vloxseg2.mask.nxv4f32.nxv4i64(<vscale x 4 x float> %val,<vscale x 4 x float> %val, float* %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv2r.v v6, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; CHECK-NEXT: vloxseg2ei16.v v6, (a0), v10, v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vloxseg2.mask.nxv4f32.nxv4i16(<vscale x 4 x float> %val,<vscale x 4 x float> %val, float* %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
define <vscale x 4 x float> @test_vloxseg3_mask_nxv4f32_nxv4i32(<vscale x 4 x float> %val, float* %base, <vscale x 4 x i32> %index, i64 %vl, <vscale x 4 x i1> %mask) {
; CHECK-LABEL: test_vloxseg3_mask_nxv4f32_nxv4i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v6, v8
+; CHECK-NEXT: vmv2r.v v12, v10
+; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
-; CHECK-NEXT: vloxseg3ei32.v v12, (a0), v10, v0.t
-; CHECK-NEXT: vmv2r.v v8, v14
+; CHECK-NEXT: vloxseg3ei32.v v6, (a0), v12, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vloxseg3.mask.nxv4f32.nxv4i32(<vscale x 4 x float> %val,<vscale x 4 x float> %val,<vscale x 4 x float> %val, float* %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
define <vscale x 4 x float> @test_vloxseg3_mask_nxv4f32_nxv4i8(<vscale x 4 x float> %val, float* %base, <vscale x 4 x i8> %index, i64 %vl, <vscale x 4 x i1> %mask) {
; CHECK-LABEL: test_vloxseg3_mask_nxv4f32_nxv4i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v6, v8
+; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
-; CHECK-NEXT: vloxseg3ei8.v v12, (a0), v10, v0.t
-; CHECK-NEXT: vmv2r.v v8, v14
+; CHECK-NEXT: vloxseg3ei8.v v6, (a0), v12, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vloxseg3.mask.nxv4f32.nxv4i8(<vscale x 4 x float> %val,<vscale x 4 x float> %val,<vscale x 4 x float> %val, float* %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
; CHECK-LABEL: test_vloxseg3_mask_nxv4f32_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v6, v8
-; CHECK-NEXT: vmv2r.v v10, v6
+; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; CHECK-NEXT: vloxseg3ei64.v v6, (a0), v12, v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vloxseg3.mask.nxv4f32.nxv4i64(<vscale x 4 x float> %val,<vscale x 4 x float> %val,<vscale x 4 x float> %val, float* %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
define <vscale x 4 x float> @test_vloxseg3_mask_nxv4f32_nxv4i16(<vscale x 4 x float> %val, float* %base, <vscale x 4 x i16> %index, i64 %vl, <vscale x 4 x i1> %mask) {
; CHECK-LABEL: test_vloxseg3_mask_nxv4f32_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v6, v8
+; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
-; CHECK-NEXT: vloxseg3ei16.v v12, (a0), v10, v0.t
-; CHECK-NEXT: vmv2r.v v8, v14
+; CHECK-NEXT: vloxseg3ei16.v v6, (a0), v12, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vloxseg3.mask.nxv4f32.nxv4i16(<vscale x 4 x float> %val,<vscale x 4 x float> %val,<vscale x 4 x float> %val, float* %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
; CHECK-LABEL: test_vloxseg4_mask_nxv4f32_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; CHECK-NEXT: vloxseg4ei32.v v12, (a0), v10, v0.t
; CHECK-NEXT: vmv2r.v v8, v14
; CHECK-LABEL: test_vloxseg4_mask_nxv4f32_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; CHECK-NEXT: vloxseg4ei8.v v12, (a0), v10, v0.t
; CHECK-NEXT: vmv2r.v v8, v14
define <vscale x 4 x float> @test_vloxseg4_mask_nxv4f32_nxv4i64(<vscale x 4 x float> %val, float* %base, <vscale x 4 x i64> %index, i64 %vl, <vscale x 4 x i1> %mask) {
; CHECK-LABEL: test_vloxseg4_mask_nxv4f32_nxv4i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv2r.v v16, v8
-; CHECK-NEXT: vmv2r.v v18, v16
-; CHECK-NEXT: vmv2r.v v20, v16
-; CHECK-NEXT: vmv2r.v v22, v16
+; CHECK-NEXT: vmv2r.v v6, v8
+; CHECK-NEXT: vmv2r.v v10, v8
+; CHECK-NEXT: vmv4r.v v16, v12
+; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
-; CHECK-NEXT: vloxseg4ei64.v v16, (a0), v12, v0.t
-; CHECK-NEXT: vmv2r.v v8, v18
+; CHECK-NEXT: vloxseg4ei64.v v6, (a0), v16, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vloxseg4.mask.nxv4f32.nxv4i64(<vscale x 4 x float> %val,<vscale x 4 x float> %val,<vscale x 4 x float> %val,<vscale x 4 x float> %val, float* %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
; CHECK-LABEL: test_vloxseg4_mask_nxv4f32_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; CHECK-NEXT: vloxseg4ei16.v v12, (a0), v10, v0.t
; CHECK-NEXT: vmv2r.v v8, v14
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
; CHECK-NEXT: vlseg2e16.v v4, (a0)
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 16 x i16>,<vscale x 16 x i16>} @llvm.riscv.vlseg2.nxv16i16(i16* %base, i32 %vl)
; CHECK-NEXT: vlseg2e16.v v4, (a0)
; CHECK-NEXT: vmv4r.v v8, v4
; CHECK-NEXT: vlseg2e16.v v4, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 16 x i16>,<vscale x 16 x i16>} @llvm.riscv.vlseg2.nxv16i16(i16* %base, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vlseg2e8.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlseg2.nxv1i8(i8* %base, i32 %vl)
; CHECK-NEXT: vlseg2e8.v v7, (a0)
; CHECK-NEXT: vmv1r.v v8, v7
; CHECK-NEXT: vlseg2e8.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlseg2.nxv1i8(i8* %base, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vlseg3e8.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlseg3.nxv1i8(i8* %base, i32 %vl)
; CHECK-NEXT: vmv1r.v v8, v7
; CHECK-NEXT: vmv1r.v v9, v7
; CHECK-NEXT: vlseg3e8.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlseg3.nxv1i8(i8* %base, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vlseg4e8.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlseg4.nxv1i8(i8* %base, i32 %vl)
; CHECK-NEXT: vmv1r.v v9, v7
; CHECK-NEXT: vmv1r.v v10, v7
; CHECK-NEXT: vlseg4e8.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlseg4.nxv1i8(i8* %base, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vlseg5e8.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlseg5.nxv1i8(i8* %base, i32 %vl)
; CHECK-NEXT: vmv1r.v v10, v7
; CHECK-NEXT: vmv1r.v v11, v7
; CHECK-NEXT: vlseg5e8.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlseg5.nxv1i8(i8* %base, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vlseg6e8.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlseg6.nxv1i8(i8* %base, i32 %vl)
; CHECK-NEXT: vmv1r.v v11, v7
; CHECK-NEXT: vmv1r.v v12, v7
; CHECK-NEXT: vlseg6e8.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlseg6.nxv1i8(i8* %base, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vlseg7e8.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlseg7.nxv1i8(i8* %base, i32 %vl)
; CHECK-NEXT: vmv1r.v v12, v7
; CHECK-NEXT: vmv1r.v v13, v7
; CHECK-NEXT: vlseg7e8.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlseg7.nxv1i8(i8* %base, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vlseg8e8.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlseg8.nxv1i8(i8* %base, i32 %vl)
; CHECK-NEXT: vmv1r.v v13, v7
; CHECK-NEXT: vmv1r.v v14, v7
; CHECK-NEXT: vlseg8e8.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlseg8.nxv1i8(i8* %base, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
; CHECK-NEXT: vlseg2e8.v v6, (a0)
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vlseg2.nxv16i8(i8* %base, i32 %vl)
; CHECK-NEXT: vlseg2e8.v v6, (a0)
; CHECK-NEXT: vmv2r.v v8, v6
; CHECK-NEXT: vlseg2e8.v v6, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vlseg2.nxv16i8(i8* %base, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
; CHECK-NEXT: vlseg3e8.v v6, (a0)
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vlseg3.nxv16i8(i8* %base, i32 %vl)
; CHECK-NEXT: vmv2r.v v8, v6
; CHECK-NEXT: vmv2r.v v10, v6
; CHECK-NEXT: vlseg3e8.v v6, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vlseg3.nxv16i8(i8* %base, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
; CHECK-NEXT: vlseg4e8.v v6, (a0)
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vlseg4.nxv16i8(i8* %base, i32 %vl)
; CHECK-NEXT: vmv2r.v v10, v6
; CHECK-NEXT: vmv2r.v v12, v6
; CHECK-NEXT: vlseg4e8.v v6, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vlseg4.nxv16i8(i8* %base, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vlseg2e32.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlseg2.nxv2i32(i32* %base, i32 %vl)
; CHECK-NEXT: vlseg2e32.v v7, (a0)
; CHECK-NEXT: vmv1r.v v8, v7
; CHECK-NEXT: vlseg2e32.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlseg2.nxv2i32(i32* %base, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vlseg3e32.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlseg3.nxv2i32(i32* %base, i32 %vl)
; CHECK-NEXT: vmv1r.v v8, v7
; CHECK-NEXT: vmv1r.v v9, v7
; CHECK-NEXT: vlseg3e32.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlseg3.nxv2i32(i32* %base, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vlseg4e32.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlseg4.nxv2i32(i32* %base, i32 %vl)
; CHECK-NEXT: vmv1r.v v9, v7
; CHECK-NEXT: vmv1r.v v10, v7
; CHECK-NEXT: vlseg4e32.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlseg4.nxv2i32(i32* %base, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vlseg5e32.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlseg5.nxv2i32(i32* %base, i32 %vl)
; CHECK-NEXT: vmv1r.v v10, v7
; CHECK-NEXT: vmv1r.v v11, v7
; CHECK-NEXT: vlseg5e32.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlseg5.nxv2i32(i32* %base, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vlseg6e32.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlseg6.nxv2i32(i32* %base, i32 %vl)
; CHECK-NEXT: vmv1r.v v11, v7
; CHECK-NEXT: vmv1r.v v12, v7
; CHECK-NEXT: vlseg6e32.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlseg6.nxv2i32(i32* %base, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vlseg7e32.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlseg7.nxv2i32(i32* %base, i32 %vl)
; CHECK-NEXT: vmv1r.v v12, v7
; CHECK-NEXT: vmv1r.v v13, v7
; CHECK-NEXT: vlseg7e32.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlseg7.nxv2i32(i32* %base, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vlseg8e32.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlseg8.nxv2i32(i32* %base, i32 %vl)
; CHECK-NEXT: vmv1r.v v13, v7
; CHECK-NEXT: vmv1r.v v14, v7
; CHECK-NEXT: vlseg8e32.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlseg8.nxv2i32(i32* %base, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vlseg2e16.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlseg2.nxv4i16(i16* %base, i32 %vl)
; CHECK-NEXT: vlseg2e16.v v7, (a0)
; CHECK-NEXT: vmv1r.v v8, v7
; CHECK-NEXT: vlseg2e16.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlseg2.nxv4i16(i16* %base, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vlseg3e16.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlseg3.nxv4i16(i16* %base, i32 %vl)
; CHECK-NEXT: vmv1r.v v8, v7
; CHECK-NEXT: vmv1r.v v9, v7
; CHECK-NEXT: vlseg3e16.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlseg3.nxv4i16(i16* %base, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vlseg4e16.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlseg4.nxv4i16(i16* %base, i32 %vl)
; CHECK-NEXT: vmv1r.v v9, v7
; CHECK-NEXT: vmv1r.v v10, v7
; CHECK-NEXT: vlseg4e16.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlseg4.nxv4i16(i16* %base, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vlseg5e16.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlseg5.nxv4i16(i16* %base, i32 %vl)
; CHECK-NEXT: vmv1r.v v10, v7
; CHECK-NEXT: vmv1r.v v11, v7
; CHECK-NEXT: vlseg5e16.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlseg5.nxv4i16(i16* %base, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vlseg6e16.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlseg6.nxv4i16(i16* %base, i32 %vl)
; CHECK-NEXT: vmv1r.v v11, v7
; CHECK-NEXT: vmv1r.v v12, v7
; CHECK-NEXT: vlseg6e16.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlseg6.nxv4i16(i16* %base, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vlseg7e16.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlseg7.nxv4i16(i16* %base, i32 %vl)
; CHECK-NEXT: vmv1r.v v12, v7
; CHECK-NEXT: vmv1r.v v13, v7
; CHECK-NEXT: vlseg7e16.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlseg7.nxv4i16(i16* %base, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vlseg8e16.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlseg8.nxv4i16(i16* %base, i32 %vl)
; CHECK-NEXT: vmv1r.v v13, v7
; CHECK-NEXT: vmv1r.v v14, v7
; CHECK-NEXT: vlseg8e16.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlseg8.nxv4i16(i16* %base, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vlseg2e32.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlseg2.nxv1i32(i32* %base, i32 %vl)
; CHECK-NEXT: vlseg2e32.v v7, (a0)
; CHECK-NEXT: vmv1r.v v8, v7
; CHECK-NEXT: vlseg2e32.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlseg2.nxv1i32(i32* %base, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vlseg3e32.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlseg3.nxv1i32(i32* %base, i32 %vl)
; CHECK-NEXT: vmv1r.v v8, v7
; CHECK-NEXT: vmv1r.v v9, v7
; CHECK-NEXT: vlseg3e32.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlseg3.nxv1i32(i32* %base, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vlseg4e32.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlseg4.nxv1i32(i32* %base, i32 %vl)
; CHECK-NEXT: vmv1r.v v9, v7
; CHECK-NEXT: vmv1r.v v10, v7
; CHECK-NEXT: vlseg4e32.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlseg4.nxv1i32(i32* %base, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vlseg5e32.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlseg5.nxv1i32(i32* %base, i32 %vl)
; CHECK-NEXT: vmv1r.v v10, v7
; CHECK-NEXT: vmv1r.v v11, v7
; CHECK-NEXT: vlseg5e32.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlseg5.nxv1i32(i32* %base, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vlseg6e32.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlseg6.nxv1i32(i32* %base, i32 %vl)
; CHECK-NEXT: vmv1r.v v11, v7
; CHECK-NEXT: vmv1r.v v12, v7
; CHECK-NEXT: vlseg6e32.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlseg6.nxv1i32(i32* %base, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vlseg7e32.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlseg7.nxv1i32(i32* %base, i32 %vl)
; CHECK-NEXT: vmv1r.v v12, v7
; CHECK-NEXT: vmv1r.v v13, v7
; CHECK-NEXT: vlseg7e32.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlseg7.nxv1i32(i32* %base, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vlseg8e32.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlseg8.nxv1i32(i32* %base, i32 %vl)
; CHECK-NEXT: vmv1r.v v13, v7
; CHECK-NEXT: vmv1r.v v14, v7
; CHECK-NEXT: vlseg8e32.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlseg8.nxv1i32(i32* %base, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
; CHECK-NEXT: vlseg2e16.v v6, (a0)
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vlseg2.nxv8i16(i16* %base, i32 %vl)
; CHECK-NEXT: vlseg2e16.v v6, (a0)
; CHECK-NEXT: vmv2r.v v8, v6
; CHECK-NEXT: vlseg2e16.v v6, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vlseg2.nxv8i16(i16* %base, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
; CHECK-NEXT: vlseg3e16.v v6, (a0)
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vlseg3.nxv8i16(i16* %base, i32 %vl)
; CHECK-NEXT: vmv2r.v v8, v6
; CHECK-NEXT: vmv2r.v v10, v6
; CHECK-NEXT: vlseg3e16.v v6, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vlseg3.nxv8i16(i16* %base, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
; CHECK-NEXT: vlseg4e16.v v6, (a0)
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vlseg4.nxv8i16(i16* %base, i32 %vl)
; CHECK-NEXT: vmv2r.v v10, v6
; CHECK-NEXT: vmv2r.v v12, v6
; CHECK-NEXT: vlseg4e16.v v6, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vlseg4.nxv8i16(i16* %base, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vlseg2e8.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlseg2.nxv8i8(i8* %base, i32 %vl)
; CHECK-NEXT: vlseg2e8.v v7, (a0)
; CHECK-NEXT: vmv1r.v v8, v7
; CHECK-NEXT: vlseg2e8.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlseg2.nxv8i8(i8* %base, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vlseg3e8.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlseg3.nxv8i8(i8* %base, i32 %vl)
; CHECK-NEXT: vmv1r.v v8, v7
; CHECK-NEXT: vmv1r.v v9, v7
; CHECK-NEXT: vlseg3e8.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlseg3.nxv8i8(i8* %base, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vlseg4e8.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlseg4.nxv8i8(i8* %base, i32 %vl)
; CHECK-NEXT: vmv1r.v v9, v7
; CHECK-NEXT: vmv1r.v v10, v7
; CHECK-NEXT: vlseg4e8.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlseg4.nxv8i8(i8* %base, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vlseg5e8.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlseg5.nxv8i8(i8* %base, i32 %vl)
; CHECK-NEXT: vmv1r.v v10, v7
; CHECK-NEXT: vmv1r.v v11, v7
; CHECK-NEXT: vlseg5e8.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlseg5.nxv8i8(i8* %base, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vlseg6e8.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlseg6.nxv8i8(i8* %base, i32 %vl)
; CHECK-NEXT: vmv1r.v v11, v7
; CHECK-NEXT: vmv1r.v v12, v7
; CHECK-NEXT: vlseg6e8.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlseg6.nxv8i8(i8* %base, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vlseg7e8.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlseg7.nxv8i8(i8* %base, i32 %vl)
; CHECK-NEXT: vmv1r.v v12, v7
; CHECK-NEXT: vmv1r.v v13, v7
; CHECK-NEXT: vlseg7e8.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlseg7.nxv8i8(i8* %base, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vlseg8e8.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlseg8.nxv8i8(i8* %base, i32 %vl)
; CHECK-NEXT: vmv1r.v v13, v7
; CHECK-NEXT: vmv1r.v v14, v7
; CHECK-NEXT: vlseg8e8.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlseg8.nxv8i8(i8* %base, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
; CHECK-NEXT: vlseg2e32.v v4, (a0)
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i32>,<vscale x 8 x i32>} @llvm.riscv.vlseg2.nxv8i32(i32* %base, i32 %vl)
; CHECK-NEXT: vlseg2e32.v v4, (a0)
; CHECK-NEXT: vmv4r.v v8, v4
; CHECK-NEXT: vlseg2e32.v v4, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i32>,<vscale x 8 x i32>} @llvm.riscv.vlseg2.nxv8i32(i32* %base, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vlseg2e8.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlseg2.nxv4i8(i8* %base, i32 %vl)
; CHECK-NEXT: vlseg2e8.v v7, (a0)
; CHECK-NEXT: vmv1r.v v8, v7
; CHECK-NEXT: vlseg2e8.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlseg2.nxv4i8(i8* %base, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vlseg3e8.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlseg3.nxv4i8(i8* %base, i32 %vl)
; CHECK-NEXT: vmv1r.v v8, v7
; CHECK-NEXT: vmv1r.v v9, v7
; CHECK-NEXT: vlseg3e8.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlseg3.nxv4i8(i8* %base, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vlseg4e8.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlseg4.nxv4i8(i8* %base, i32 %vl)
; CHECK-NEXT: vmv1r.v v9, v7
; CHECK-NEXT: vmv1r.v v10, v7
; CHECK-NEXT: vlseg4e8.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlseg4.nxv4i8(i8* %base, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vlseg5e8.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlseg5.nxv4i8(i8* %base, i32 %vl)
; CHECK-NEXT: vmv1r.v v10, v7
; CHECK-NEXT: vmv1r.v v11, v7
; CHECK-NEXT: vlseg5e8.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlseg5.nxv4i8(i8* %base, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vlseg6e8.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlseg6.nxv4i8(i8* %base, i32 %vl)
; CHECK-NEXT: vmv1r.v v11, v7
; CHECK-NEXT: vmv1r.v v12, v7
; CHECK-NEXT: vlseg6e8.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlseg6.nxv4i8(i8* %base, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vlseg7e8.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlseg7.nxv4i8(i8* %base, i32 %vl)
; CHECK-NEXT: vmv1r.v v12, v7
; CHECK-NEXT: vmv1r.v v13, v7
; CHECK-NEXT: vlseg7e8.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlseg7.nxv4i8(i8* %base, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vlseg8e8.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlseg8.nxv4i8(i8* %base, i32 %vl)
; CHECK-NEXT: vmv1r.v v13, v7
; CHECK-NEXT: vmv1r.v v14, v7
; CHECK-NEXT: vlseg8e8.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlseg8.nxv4i8(i8* %base, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vlseg2e16.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlseg2.nxv1i16(i16* %base, i32 %vl)
; CHECK-NEXT: vlseg2e16.v v7, (a0)
; CHECK-NEXT: vmv1r.v v8, v7
; CHECK-NEXT: vlseg2e16.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlseg2.nxv1i16(i16* %base, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vlseg3e16.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlseg3.nxv1i16(i16* %base, i32 %vl)
; CHECK-NEXT: vmv1r.v v8, v7
; CHECK-NEXT: vmv1r.v v9, v7
; CHECK-NEXT: vlseg3e16.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlseg3.nxv1i16(i16* %base, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vlseg4e16.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlseg4.nxv1i16(i16* %base, i32 %vl)
; CHECK-NEXT: vmv1r.v v9, v7
; CHECK-NEXT: vmv1r.v v10, v7
; CHECK-NEXT: vlseg4e16.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlseg4.nxv1i16(i16* %base, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vlseg5e16.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlseg5.nxv1i16(i16* %base, i32 %vl)
; CHECK-NEXT: vmv1r.v v10, v7
; CHECK-NEXT: vmv1r.v v11, v7
; CHECK-NEXT: vlseg5e16.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlseg5.nxv1i16(i16* %base, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vlseg6e16.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlseg6.nxv1i16(i16* %base, i32 %vl)
; CHECK-NEXT: vmv1r.v v11, v7
; CHECK-NEXT: vmv1r.v v12, v7
; CHECK-NEXT: vlseg6e16.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlseg6.nxv1i16(i16* %base, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vlseg7e16.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlseg7.nxv1i16(i16* %base, i32 %vl)
; CHECK-NEXT: vmv1r.v v12, v7
; CHECK-NEXT: vmv1r.v v13, v7
; CHECK-NEXT: vlseg7e16.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlseg7.nxv1i16(i16* %base, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vlseg8e16.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlseg8.nxv1i16(i16* %base, i32 %vl)
; CHECK-NEXT: vmv1r.v v13, v7
; CHECK-NEXT: vmv1r.v v14, v7
; CHECK-NEXT: vlseg8e16.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlseg8.nxv1i16(i16* %base, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu
; CHECK-NEXT: vlseg2e8.v v4, (a0)
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 32 x i8>,<vscale x 32 x i8>} @llvm.riscv.vlseg2.nxv32i8(i8* %base, i32 %vl)
; CHECK-NEXT: vlseg2e8.v v4, (a0)
; CHECK-NEXT: vmv4r.v v8, v4
; CHECK-NEXT: vlseg2e8.v v4, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 32 x i8>,<vscale x 32 x i8>} @llvm.riscv.vlseg2.nxv32i8(i8* %base, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vlseg2e8.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlseg2.nxv2i8(i8* %base, i32 %vl)
; CHECK-NEXT: vlseg2e8.v v7, (a0)
; CHECK-NEXT: vmv1r.v v8, v7
; CHECK-NEXT: vlseg2e8.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlseg2.nxv2i8(i8* %base, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vlseg3e8.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlseg3.nxv2i8(i8* %base, i32 %vl)
; CHECK-NEXT: vmv1r.v v8, v7
; CHECK-NEXT: vmv1r.v v9, v7
; CHECK-NEXT: vlseg3e8.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlseg3.nxv2i8(i8* %base, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vlseg4e8.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlseg4.nxv2i8(i8* %base, i32 %vl)
; CHECK-NEXT: vmv1r.v v9, v7
; CHECK-NEXT: vmv1r.v v10, v7
; CHECK-NEXT: vlseg4e8.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlseg4.nxv2i8(i8* %base, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vlseg5e8.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlseg5.nxv2i8(i8* %base, i32 %vl)
; CHECK-NEXT: vmv1r.v v10, v7
; CHECK-NEXT: vmv1r.v v11, v7
; CHECK-NEXT: vlseg5e8.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlseg5.nxv2i8(i8* %base, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vlseg6e8.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlseg6.nxv2i8(i8* %base, i32 %vl)
; CHECK-NEXT: vmv1r.v v11, v7
; CHECK-NEXT: vmv1r.v v12, v7
; CHECK-NEXT: vlseg6e8.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlseg6.nxv2i8(i8* %base, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vlseg7e8.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlseg7.nxv2i8(i8* %base, i32 %vl)
; CHECK-NEXT: vmv1r.v v12, v7
; CHECK-NEXT: vmv1r.v v13, v7
; CHECK-NEXT: vlseg7e8.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlseg7.nxv2i8(i8* %base, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vlseg8e8.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlseg8.nxv2i8(i8* %base, i32 %vl)
; CHECK-NEXT: vmv1r.v v13, v7
; CHECK-NEXT: vmv1r.v v14, v7
; CHECK-NEXT: vlseg8e8.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlseg8.nxv2i8(i8* %base, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vlseg2e16.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlseg2.nxv2i16(i16* %base, i32 %vl)
; CHECK-NEXT: vlseg2e16.v v7, (a0)
; CHECK-NEXT: vmv1r.v v8, v7
; CHECK-NEXT: vlseg2e16.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlseg2.nxv2i16(i16* %base, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vlseg3e16.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlseg3.nxv2i16(i16* %base, i32 %vl)
; CHECK-NEXT: vmv1r.v v8, v7
; CHECK-NEXT: vmv1r.v v9, v7
; CHECK-NEXT: vlseg3e16.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlseg3.nxv2i16(i16* %base, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vlseg4e16.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlseg4.nxv2i16(i16* %base, i32 %vl)
; CHECK-NEXT: vmv1r.v v9, v7
; CHECK-NEXT: vmv1r.v v10, v7
; CHECK-NEXT: vlseg4e16.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlseg4.nxv2i16(i16* %base, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vlseg5e16.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlseg5.nxv2i16(i16* %base, i32 %vl)
; CHECK-NEXT: vmv1r.v v10, v7
; CHECK-NEXT: vmv1r.v v11, v7
; CHECK-NEXT: vlseg5e16.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlseg5.nxv2i16(i16* %base, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vlseg6e16.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlseg6.nxv2i16(i16* %base, i32 %vl)
; CHECK-NEXT: vmv1r.v v11, v7
; CHECK-NEXT: vmv1r.v v12, v7
; CHECK-NEXT: vlseg6e16.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlseg6.nxv2i16(i16* %base, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vlseg7e16.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlseg7.nxv2i16(i16* %base, i32 %vl)
; CHECK-NEXT: vmv1r.v v12, v7
; CHECK-NEXT: vmv1r.v v13, v7
; CHECK-NEXT: vlseg7e16.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlseg7.nxv2i16(i16* %base, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vlseg8e16.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlseg8.nxv2i16(i16* %base, i32 %vl)
; CHECK-NEXT: vmv1r.v v13, v7
; CHECK-NEXT: vmv1r.v v14, v7
; CHECK-NEXT: vlseg8e16.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlseg8.nxv2i16(i16* %base, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; CHECK-NEXT: vlseg2e32.v v6, (a0)
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vlseg2.nxv4i32(i32* %base, i32 %vl)
; CHECK-NEXT: vlseg2e32.v v6, (a0)
; CHECK-NEXT: vmv2r.v v8, v6
; CHECK-NEXT: vlseg2e32.v v6, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vlseg2.nxv4i32(i32* %base, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; CHECK-NEXT: vlseg3e32.v v6, (a0)
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vlseg3.nxv4i32(i32* %base, i32 %vl)
; CHECK-NEXT: vmv2r.v v8, v6
; CHECK-NEXT: vmv2r.v v10, v6
; CHECK-NEXT: vlseg3e32.v v6, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vlseg3.nxv4i32(i32* %base, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; CHECK-NEXT: vlseg4e32.v v6, (a0)
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vlseg4.nxv4i32(i32* %base, i32 %vl)
; CHECK-NEXT: vmv2r.v v10, v6
; CHECK-NEXT: vmv2r.v v12, v6
; CHECK-NEXT: vlseg4e32.v v6, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vlseg4.nxv4i32(i32* %base, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
; CHECK-NEXT: vlseg2e16.v v4, (a0)
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 16 x half>,<vscale x 16 x half>} @llvm.riscv.vlseg2.nxv16f16(half* %base, i32 %vl)
; CHECK-NEXT: vlseg2e16.v v4, (a0)
; CHECK-NEXT: vmv4r.v v8, v4
; CHECK-NEXT: vlseg2e16.v v4, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 16 x half>,<vscale x 16 x half>} @llvm.riscv.vlseg2.nxv16f16(half* %base, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu
; CHECK-NEXT: vlseg2e64.v v4, (a0)
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x double>,<vscale x 4 x double>} @llvm.riscv.vlseg2.nxv4f64(double* %base, i32 %vl)
; CHECK-NEXT: vlseg2e64.v v4, (a0)
; CHECK-NEXT: vmv4r.v v8, v4
; CHECK-NEXT: vlseg2e64.v v4, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x double>,<vscale x 4 x double>} @llvm.riscv.vlseg2.nxv4f64(double* %base, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vlseg2e64.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlseg2.nxv1f64(double* %base, i32 %vl)
; CHECK-NEXT: vlseg2e64.v v7, (a0)
; CHECK-NEXT: vmv1r.v v8, v7
; CHECK-NEXT: vlseg2e64.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlseg2.nxv1f64(double* %base, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vlseg3e64.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlseg3.nxv1f64(double* %base, i32 %vl)
; CHECK-NEXT: vmv1r.v v8, v7
; CHECK-NEXT: vmv1r.v v9, v7
; CHECK-NEXT: vlseg3e64.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlseg3.nxv1f64(double* %base, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vlseg4e64.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlseg4.nxv1f64(double* %base, i32 %vl)
; CHECK-NEXT: vmv1r.v v9, v7
; CHECK-NEXT: vmv1r.v v10, v7
; CHECK-NEXT: vlseg4e64.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlseg4.nxv1f64(double* %base, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vlseg5e64.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlseg5.nxv1f64(double* %base, i32 %vl)
; CHECK-NEXT: vmv1r.v v10, v7
; CHECK-NEXT: vmv1r.v v11, v7
; CHECK-NEXT: vlseg5e64.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlseg5.nxv1f64(double* %base, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vlseg6e64.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlseg6.nxv1f64(double* %base, i32 %vl)
; CHECK-NEXT: vmv1r.v v11, v7
; CHECK-NEXT: vmv1r.v v12, v7
; CHECK-NEXT: vlseg6e64.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlseg6.nxv1f64(double* %base, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vlseg7e64.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlseg7.nxv1f64(double* %base, i32 %vl)
; CHECK-NEXT: vmv1r.v v12, v7
; CHECK-NEXT: vmv1r.v v13, v7
; CHECK-NEXT: vlseg7e64.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlseg7.nxv1f64(double* %base, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vlseg8e64.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlseg8.nxv1f64(double* %base, i32 %vl)
; CHECK-NEXT: vmv1r.v v13, v7
; CHECK-NEXT: vmv1r.v v14, v7
; CHECK-NEXT: vlseg8e64.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlseg8.nxv1f64(double* %base, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vlseg2e32.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlseg2.nxv2f32(float* %base, i32 %vl)
; CHECK-NEXT: vlseg2e32.v v7, (a0)
; CHECK-NEXT: vmv1r.v v8, v7
; CHECK-NEXT: vlseg2e32.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlseg2.nxv2f32(float* %base, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vlseg3e32.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlseg3.nxv2f32(float* %base, i32 %vl)
; CHECK-NEXT: vmv1r.v v8, v7
; CHECK-NEXT: vmv1r.v v9, v7
; CHECK-NEXT: vlseg3e32.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlseg3.nxv2f32(float* %base, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vlseg4e32.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlseg4.nxv2f32(float* %base, i32 %vl)
; CHECK-NEXT: vmv1r.v v9, v7
; CHECK-NEXT: vmv1r.v v10, v7
; CHECK-NEXT: vlseg4e32.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlseg4.nxv2f32(float* %base, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vlseg5e32.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlseg5.nxv2f32(float* %base, i32 %vl)
; CHECK-NEXT: vmv1r.v v10, v7
; CHECK-NEXT: vmv1r.v v11, v7
; CHECK-NEXT: vlseg5e32.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlseg5.nxv2f32(float* %base, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vlseg6e32.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlseg6.nxv2f32(float* %base, i32 %vl)
; CHECK-NEXT: vmv1r.v v11, v7
; CHECK-NEXT: vmv1r.v v12, v7
; CHECK-NEXT: vlseg6e32.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlseg6.nxv2f32(float* %base, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vlseg7e32.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlseg7.nxv2f32(float* %base, i32 %vl)
; CHECK-NEXT: vmv1r.v v12, v7
; CHECK-NEXT: vmv1r.v v13, v7
; CHECK-NEXT: vlseg7e32.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlseg7.nxv2f32(float* %base, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vlseg8e32.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlseg8.nxv2f32(float* %base, i32 %vl)
; CHECK-NEXT: vmv1r.v v13, v7
; CHECK-NEXT: vmv1r.v v14, v7
; CHECK-NEXT: vlseg8e32.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlseg8.nxv2f32(float* %base, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vlseg2e16.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlseg2.nxv1f16(half* %base, i32 %vl)
; CHECK-NEXT: vlseg2e16.v v7, (a0)
; CHECK-NEXT: vmv1r.v v8, v7
; CHECK-NEXT: vlseg2e16.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlseg2.nxv1f16(half* %base, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vlseg3e16.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlseg3.nxv1f16(half* %base, i32 %vl)
; CHECK-NEXT: vmv1r.v v8, v7
; CHECK-NEXT: vmv1r.v v9, v7
; CHECK-NEXT: vlseg3e16.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlseg3.nxv1f16(half* %base, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vlseg4e16.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlseg4.nxv1f16(half* %base, i32 %vl)
; CHECK-NEXT: vmv1r.v v9, v7
; CHECK-NEXT: vmv1r.v v10, v7
; CHECK-NEXT: vlseg4e16.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlseg4.nxv1f16(half* %base, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vlseg5e16.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlseg5.nxv1f16(half* %base, i32 %vl)
; CHECK-NEXT: vmv1r.v v10, v7
; CHECK-NEXT: vmv1r.v v11, v7
; CHECK-NEXT: vlseg5e16.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlseg5.nxv1f16(half* %base, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vlseg6e16.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlseg6.nxv1f16(half* %base, i32 %vl)
; CHECK-NEXT: vmv1r.v v11, v7
; CHECK-NEXT: vmv1r.v v12, v7
; CHECK-NEXT: vlseg6e16.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlseg6.nxv1f16(half* %base, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vlseg7e16.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlseg7.nxv1f16(half* %base, i32 %vl)
; CHECK-NEXT: vmv1r.v v12, v7
; CHECK-NEXT: vmv1r.v v13, v7
; CHECK-NEXT: vlseg7e16.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlseg7.nxv1f16(half* %base, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vlseg8e16.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlseg8.nxv1f16(half* %base, i32 %vl)
; CHECK-NEXT: vmv1r.v v13, v7
; CHECK-NEXT: vmv1r.v v14, v7
; CHECK-NEXT: vlseg8e16.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlseg8.nxv1f16(half* %base, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vlseg2e32.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlseg2.nxv1f32(float* %base, i32 %vl)
; CHECK-NEXT: vlseg2e32.v v7, (a0)
; CHECK-NEXT: vmv1r.v v8, v7
; CHECK-NEXT: vlseg2e32.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlseg2.nxv1f32(float* %base, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vlseg3e32.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlseg3.nxv1f32(float* %base, i32 %vl)
; CHECK-NEXT: vmv1r.v v8, v7
; CHECK-NEXT: vmv1r.v v9, v7
; CHECK-NEXT: vlseg3e32.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlseg3.nxv1f32(float* %base, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vlseg4e32.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlseg4.nxv1f32(float* %base, i32 %vl)
; CHECK-NEXT: vmv1r.v v9, v7
; CHECK-NEXT: vmv1r.v v10, v7
; CHECK-NEXT: vlseg4e32.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlseg4.nxv1f32(float* %base, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vlseg5e32.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlseg5.nxv1f32(float* %base, i32 %vl)
; CHECK-NEXT: vmv1r.v v10, v7
; CHECK-NEXT: vmv1r.v v11, v7
; CHECK-NEXT: vlseg5e32.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlseg5.nxv1f32(float* %base, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vlseg6e32.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlseg6.nxv1f32(float* %base, i32 %vl)
; CHECK-NEXT: vmv1r.v v11, v7
; CHECK-NEXT: vmv1r.v v12, v7
; CHECK-NEXT: vlseg6e32.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlseg6.nxv1f32(float* %base, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vlseg7e32.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlseg7.nxv1f32(float* %base, i32 %vl)
; CHECK-NEXT: vmv1r.v v12, v7
; CHECK-NEXT: vmv1r.v v13, v7
; CHECK-NEXT: vlseg7e32.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlseg7.nxv1f32(float* %base, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vlseg8e32.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlseg8.nxv1f32(float* %base, i32 %vl)
; CHECK-NEXT: vmv1r.v v13, v7
; CHECK-NEXT: vmv1r.v v14, v7
; CHECK-NEXT: vlseg8e32.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlseg8.nxv1f32(float* %base, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
; CHECK-NEXT: vlseg2e16.v v6, (a0)
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vlseg2.nxv8f16(half* %base, i32 %vl)
; CHECK-NEXT: vlseg2e16.v v6, (a0)
; CHECK-NEXT: vmv2r.v v8, v6
; CHECK-NEXT: vlseg2e16.v v6, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vlseg2.nxv8f16(half* %base, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
; CHECK-NEXT: vlseg3e16.v v6, (a0)
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vlseg3.nxv8f16(half* %base, i32 %vl)
; CHECK-NEXT: vmv2r.v v8, v6
; CHECK-NEXT: vmv2r.v v10, v6
; CHECK-NEXT: vlseg3e16.v v6, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vlseg3.nxv8f16(half* %base, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
; CHECK-NEXT: vlseg4e16.v v6, (a0)
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vlseg4.nxv8f16(half* %base, i32 %vl)
; CHECK-NEXT: vmv2r.v v10, v6
; CHECK-NEXT: vmv2r.v v12, v6
; CHECK-NEXT: vlseg4e16.v v6, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vlseg4.nxv8f16(half* %base, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
; CHECK-NEXT: vlseg2e32.v v4, (a0)
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x float>,<vscale x 8 x float>} @llvm.riscv.vlseg2.nxv8f32(float* %base, i32 %vl)
; CHECK-NEXT: vlseg2e32.v v4, (a0)
; CHECK-NEXT: vmv4r.v v8, v4
; CHECK-NEXT: vlseg2e32.v v4, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x float>,<vscale x 8 x float>} @llvm.riscv.vlseg2.nxv8f32(float* %base, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
; CHECK-NEXT: vlseg2e64.v v6, (a0)
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vlseg2.nxv2f64(double* %base, i32 %vl)
; CHECK-NEXT: vlseg2e64.v v6, (a0)
; CHECK-NEXT: vmv2r.v v8, v6
; CHECK-NEXT: vlseg2e64.v v6, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vlseg2.nxv2f64(double* %base, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
; CHECK-NEXT: vlseg3e64.v v6, (a0)
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vlseg3.nxv2f64(double* %base, i32 %vl)
; CHECK-NEXT: vmv2r.v v8, v6
; CHECK-NEXT: vmv2r.v v10, v6
; CHECK-NEXT: vlseg3e64.v v6, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vlseg3.nxv2f64(double* %base, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
; CHECK-NEXT: vlseg4e64.v v6, (a0)
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vlseg4.nxv2f64(double* %base, i32 %vl)
; CHECK-NEXT: vmv2r.v v10, v6
; CHECK-NEXT: vmv2r.v v12, v6
; CHECK-NEXT: vlseg4e64.v v6, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vlseg4.nxv2f64(double* %base, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vlseg2e16.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlseg2.nxv4f16(half* %base, i32 %vl)
; CHECK-NEXT: vlseg2e16.v v7, (a0)
; CHECK-NEXT: vmv1r.v v8, v7
; CHECK-NEXT: vlseg2e16.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlseg2.nxv4f16(half* %base, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vlseg3e16.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlseg3.nxv4f16(half* %base, i32 %vl)
; CHECK-NEXT: vmv1r.v v8, v7
; CHECK-NEXT: vmv1r.v v9, v7
; CHECK-NEXT: vlseg3e16.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlseg3.nxv4f16(half* %base, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vlseg4e16.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlseg4.nxv4f16(half* %base, i32 %vl)
; CHECK-NEXT: vmv1r.v v9, v7
; CHECK-NEXT: vmv1r.v v10, v7
; CHECK-NEXT: vlseg4e16.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlseg4.nxv4f16(half* %base, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vlseg5e16.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlseg5.nxv4f16(half* %base, i32 %vl)
; CHECK-NEXT: vmv1r.v v10, v7
; CHECK-NEXT: vmv1r.v v11, v7
; CHECK-NEXT: vlseg5e16.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlseg5.nxv4f16(half* %base, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vlseg6e16.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlseg6.nxv4f16(half* %base, i32 %vl)
; CHECK-NEXT: vmv1r.v v11, v7
; CHECK-NEXT: vmv1r.v v12, v7
; CHECK-NEXT: vlseg6e16.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlseg6.nxv4f16(half* %base, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vlseg7e16.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlseg7.nxv4f16(half* %base, i32 %vl)
; CHECK-NEXT: vmv1r.v v12, v7
; CHECK-NEXT: vmv1r.v v13, v7
; CHECK-NEXT: vlseg7e16.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlseg7.nxv4f16(half* %base, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vlseg8e16.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlseg8.nxv4f16(half* %base, i32 %vl)
; CHECK-NEXT: vmv1r.v v13, v7
; CHECK-NEXT: vmv1r.v v14, v7
; CHECK-NEXT: vlseg8e16.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlseg8.nxv4f16(half* %base, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vlseg2e16.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlseg2.nxv2f16(half* %base, i32 %vl)
; CHECK-NEXT: vlseg2e16.v v7, (a0)
; CHECK-NEXT: vmv1r.v v8, v7
; CHECK-NEXT: vlseg2e16.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlseg2.nxv2f16(half* %base, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vlseg3e16.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlseg3.nxv2f16(half* %base, i32 %vl)
; CHECK-NEXT: vmv1r.v v8, v7
; CHECK-NEXT: vmv1r.v v9, v7
; CHECK-NEXT: vlseg3e16.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlseg3.nxv2f16(half* %base, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vlseg4e16.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlseg4.nxv2f16(half* %base, i32 %vl)
; CHECK-NEXT: vmv1r.v v9, v7
; CHECK-NEXT: vmv1r.v v10, v7
; CHECK-NEXT: vlseg4e16.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlseg4.nxv2f16(half* %base, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vlseg5e16.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlseg5.nxv2f16(half* %base, i32 %vl)
; CHECK-NEXT: vmv1r.v v10, v7
; CHECK-NEXT: vmv1r.v v11, v7
; CHECK-NEXT: vlseg5e16.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlseg5.nxv2f16(half* %base, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vlseg6e16.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlseg6.nxv2f16(half* %base, i32 %vl)
; CHECK-NEXT: vmv1r.v v11, v7
; CHECK-NEXT: vmv1r.v v12, v7
; CHECK-NEXT: vlseg6e16.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlseg6.nxv2f16(half* %base, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vlseg7e16.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlseg7.nxv2f16(half* %base, i32 %vl)
; CHECK-NEXT: vmv1r.v v12, v7
; CHECK-NEXT: vmv1r.v v13, v7
; CHECK-NEXT: vlseg7e16.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlseg7.nxv2f16(half* %base, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vlseg8e16.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlseg8.nxv2f16(half* %base, i32 %vl)
; CHECK-NEXT: vmv1r.v v13, v7
; CHECK-NEXT: vmv1r.v v14, v7
; CHECK-NEXT: vlseg8e16.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlseg8.nxv2f16(half* %base, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; CHECK-NEXT: vlseg2e32.v v6, (a0)
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vlseg2.nxv4f32(float* %base, i32 %vl)
; CHECK-NEXT: vlseg2e32.v v6, (a0)
; CHECK-NEXT: vmv2r.v v8, v6
; CHECK-NEXT: vlseg2e32.v v6, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vlseg2.nxv4f32(float* %base, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; CHECK-NEXT: vlseg3e32.v v6, (a0)
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vlseg3.nxv4f32(float* %base, i32 %vl)
; CHECK-NEXT: vmv2r.v v8, v6
; CHECK-NEXT: vmv2r.v v10, v6
; CHECK-NEXT: vlseg3e32.v v6, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vlseg3.nxv4f32(float* %base, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; CHECK-NEXT: vlseg4e32.v v6, (a0)
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vlseg4.nxv4f32(float* %base, i32 %vl)
; CHECK-NEXT: vmv2r.v v10, v6
; CHECK-NEXT: vmv2r.v v12, v6
; CHECK-NEXT: vlseg4e32.v v6, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vlseg4.nxv4f32(float* %base, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
; CHECK-NEXT: vlseg2e16.v v4, (a0)
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 16 x i16>,<vscale x 16 x i16>} @llvm.riscv.vlseg2.nxv16i16(i16* %base, i64 %vl)
; CHECK-NEXT: vlseg2e16.v v4, (a0)
; CHECK-NEXT: vmv4r.v v8, v4
; CHECK-NEXT: vlseg2e16.v v4, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 16 x i16>,<vscale x 16 x i16>} @llvm.riscv.vlseg2.nxv16i16(i16* %base, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; CHECK-NEXT: vlseg2e32.v v6, (a0)
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vlseg2.nxv4i32(i32* %base, i64 %vl)
; CHECK-NEXT: vlseg2e32.v v6, (a0)
; CHECK-NEXT: vmv2r.v v8, v6
; CHECK-NEXT: vlseg2e32.v v6, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vlseg2.nxv4i32(i32* %base, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; CHECK-NEXT: vlseg3e32.v v6, (a0)
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vlseg3.nxv4i32(i32* %base, i64 %vl)
; CHECK-NEXT: vmv2r.v v8, v6
; CHECK-NEXT: vmv2r.v v10, v6
; CHECK-NEXT: vlseg3e32.v v6, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vlseg3.nxv4i32(i32* %base, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; CHECK-NEXT: vlseg4e32.v v6, (a0)
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vlseg4.nxv4i32(i32* %base, i64 %vl)
; CHECK-NEXT: vmv2r.v v10, v6
; CHECK-NEXT: vmv2r.v v12, v6
; CHECK-NEXT: vlseg4e32.v v6, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vlseg4.nxv4i32(i32* %base, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
; CHECK-NEXT: vlseg2e8.v v6, (a0)
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vlseg2.nxv16i8(i8* %base, i64 %vl)
; CHECK-NEXT: vlseg2e8.v v6, (a0)
; CHECK-NEXT: vmv2r.v v8, v6
; CHECK-NEXT: vlseg2e8.v v6, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vlseg2.nxv16i8(i8* %base, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
; CHECK-NEXT: vlseg3e8.v v6, (a0)
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vlseg3.nxv16i8(i8* %base, i64 %vl)
; CHECK-NEXT: vmv2r.v v8, v6
; CHECK-NEXT: vmv2r.v v10, v6
; CHECK-NEXT: vlseg3e8.v v6, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vlseg3.nxv16i8(i8* %base, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
; CHECK-NEXT: vlseg4e8.v v6, (a0)
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vlseg4.nxv16i8(i8* %base, i64 %vl)
; CHECK-NEXT: vmv2r.v v10, v6
; CHECK-NEXT: vmv2r.v v12, v6
; CHECK-NEXT: vlseg4e8.v v6, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vlseg4.nxv16i8(i8* %base, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vlseg2e64.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vlseg2.nxv1i64(i64* %base, i64 %vl)
; CHECK-NEXT: vlseg2e64.v v7, (a0)
; CHECK-NEXT: vmv1r.v v8, v7
; CHECK-NEXT: vlseg2e64.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vlseg2.nxv1i64(i64* %base, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vlseg3e64.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vlseg3.nxv1i64(i64* %base, i64 %vl)
; CHECK-NEXT: vmv1r.v v8, v7
; CHECK-NEXT: vmv1r.v v9, v7
; CHECK-NEXT: vlseg3e64.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vlseg3.nxv1i64(i64* %base, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vlseg4e64.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vlseg4.nxv1i64(i64* %base, i64 %vl)
; CHECK-NEXT: vmv1r.v v9, v7
; CHECK-NEXT: vmv1r.v v10, v7
; CHECK-NEXT: vlseg4e64.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vlseg4.nxv1i64(i64* %base, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vlseg5e64.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vlseg5.nxv1i64(i64* %base, i64 %vl)
; CHECK-NEXT: vmv1r.v v10, v7
; CHECK-NEXT: vmv1r.v v11, v7
; CHECK-NEXT: vlseg5e64.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vlseg5.nxv1i64(i64* %base, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vlseg6e64.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vlseg6.nxv1i64(i64* %base, i64 %vl)
; CHECK-NEXT: vmv1r.v v11, v7
; CHECK-NEXT: vmv1r.v v12, v7
; CHECK-NEXT: vlseg6e64.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vlseg6.nxv1i64(i64* %base, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vlseg7e64.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vlseg7.nxv1i64(i64* %base, i64 %vl)
; CHECK-NEXT: vmv1r.v v12, v7
; CHECK-NEXT: vmv1r.v v13, v7
; CHECK-NEXT: vlseg7e64.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vlseg7.nxv1i64(i64* %base, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vlseg8e64.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vlseg8.nxv1i64(i64* %base, i64 %vl)
; CHECK-NEXT: vmv1r.v v13, v7
; CHECK-NEXT: vmv1r.v v14, v7
; CHECK-NEXT: vlseg8e64.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vlseg8.nxv1i64(i64* %base, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vlseg2e32.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlseg2.nxv1i32(i32* %base, i64 %vl)
; CHECK-NEXT: vlseg2e32.v v7, (a0)
; CHECK-NEXT: vmv1r.v v8, v7
; CHECK-NEXT: vlseg2e32.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlseg2.nxv1i32(i32* %base, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vlseg3e32.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlseg3.nxv1i32(i32* %base, i64 %vl)
; CHECK-NEXT: vmv1r.v v8, v7
; CHECK-NEXT: vmv1r.v v9, v7
; CHECK-NEXT: vlseg3e32.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlseg3.nxv1i32(i32* %base, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vlseg4e32.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlseg4.nxv1i32(i32* %base, i64 %vl)
; CHECK-NEXT: vmv1r.v v9, v7
; CHECK-NEXT: vmv1r.v v10, v7
; CHECK-NEXT: vlseg4e32.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlseg4.nxv1i32(i32* %base, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vlseg5e32.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlseg5.nxv1i32(i32* %base, i64 %vl)
; CHECK-NEXT: vmv1r.v v10, v7
; CHECK-NEXT: vmv1r.v v11, v7
; CHECK-NEXT: vlseg5e32.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlseg5.nxv1i32(i32* %base, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vlseg6e32.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlseg6.nxv1i32(i32* %base, i64 %vl)
; CHECK-NEXT: vmv1r.v v11, v7
; CHECK-NEXT: vmv1r.v v12, v7
; CHECK-NEXT: vlseg6e32.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlseg6.nxv1i32(i32* %base, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vlseg7e32.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlseg7.nxv1i32(i32* %base, i64 %vl)
; CHECK-NEXT: vmv1r.v v12, v7
; CHECK-NEXT: vmv1r.v v13, v7
; CHECK-NEXT: vlseg7e32.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlseg7.nxv1i32(i32* %base, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vlseg8e32.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlseg8.nxv1i32(i32* %base, i64 %vl)
; CHECK-NEXT: vmv1r.v v13, v7
; CHECK-NEXT: vmv1r.v v14, v7
; CHECK-NEXT: vlseg8e32.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlseg8.nxv1i32(i32* %base, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
; CHECK-NEXT: vlseg2e16.v v6, (a0)
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vlseg2.nxv8i16(i16* %base, i64 %vl)
; CHECK-NEXT: vlseg2e16.v v6, (a0)
; CHECK-NEXT: vmv2r.v v8, v6
; CHECK-NEXT: vlseg2e16.v v6, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vlseg2.nxv8i16(i16* %base, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
; CHECK-NEXT: vlseg3e16.v v6, (a0)
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vlseg3.nxv8i16(i16* %base, i64 %vl)
; CHECK-NEXT: vmv2r.v v8, v6
; CHECK-NEXT: vmv2r.v v10, v6
; CHECK-NEXT: vlseg3e16.v v6, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vlseg3.nxv8i16(i16* %base, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
; CHECK-NEXT: vlseg4e16.v v6, (a0)
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vlseg4.nxv8i16(i16* %base, i64 %vl)
; CHECK-NEXT: vmv2r.v v10, v6
; CHECK-NEXT: vmv2r.v v12, v6
; CHECK-NEXT: vlseg4e16.v v6, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vlseg4.nxv8i16(i16* %base, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vlseg2e8.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlseg2.nxv4i8(i8* %base, i64 %vl)
; CHECK-NEXT: vlseg2e8.v v7, (a0)
; CHECK-NEXT: vmv1r.v v8, v7
; CHECK-NEXT: vlseg2e8.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlseg2.nxv4i8(i8* %base, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vlseg3e8.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlseg3.nxv4i8(i8* %base, i64 %vl)
; CHECK-NEXT: vmv1r.v v8, v7
; CHECK-NEXT: vmv1r.v v9, v7
; CHECK-NEXT: vlseg3e8.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlseg3.nxv4i8(i8* %base, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vlseg4e8.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlseg4.nxv4i8(i8* %base, i64 %vl)
; CHECK-NEXT: vmv1r.v v9, v7
; CHECK-NEXT: vmv1r.v v10, v7
; CHECK-NEXT: vlseg4e8.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlseg4.nxv4i8(i8* %base, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vlseg5e8.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlseg5.nxv4i8(i8* %base, i64 %vl)
; CHECK-NEXT: vmv1r.v v10, v7
; CHECK-NEXT: vmv1r.v v11, v7
; CHECK-NEXT: vlseg5e8.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlseg5.nxv4i8(i8* %base, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vlseg6e8.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlseg6.nxv4i8(i8* %base, i64 %vl)
; CHECK-NEXT: vmv1r.v v11, v7
; CHECK-NEXT: vmv1r.v v12, v7
; CHECK-NEXT: vlseg6e8.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlseg6.nxv4i8(i8* %base, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vlseg7e8.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlseg7.nxv4i8(i8* %base, i64 %vl)
; CHECK-NEXT: vmv1r.v v12, v7
; CHECK-NEXT: vmv1r.v v13, v7
; CHECK-NEXT: vlseg7e8.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlseg7.nxv4i8(i8* %base, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vlseg8e8.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlseg8.nxv4i8(i8* %base, i64 %vl)
; CHECK-NEXT: vmv1r.v v13, v7
; CHECK-NEXT: vmv1r.v v14, v7
; CHECK-NEXT: vlseg8e8.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlseg8.nxv4i8(i8* %base, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vlseg2e16.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlseg2.nxv1i16(i16* %base, i64 %vl)
; CHECK-NEXT: vlseg2e16.v v7, (a0)
; CHECK-NEXT: vmv1r.v v8, v7
; CHECK-NEXT: vlseg2e16.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlseg2.nxv1i16(i16* %base, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vlseg3e16.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlseg3.nxv1i16(i16* %base, i64 %vl)
; CHECK-NEXT: vmv1r.v v8, v7
; CHECK-NEXT: vmv1r.v v9, v7
; CHECK-NEXT: vlseg3e16.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlseg3.nxv1i16(i16* %base, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vlseg4e16.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlseg4.nxv1i16(i16* %base, i64 %vl)
; CHECK-NEXT: vmv1r.v v9, v7
; CHECK-NEXT: vmv1r.v v10, v7
; CHECK-NEXT: vlseg4e16.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlseg4.nxv1i16(i16* %base, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vlseg5e16.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlseg5.nxv1i16(i16* %base, i64 %vl)
; CHECK-NEXT: vmv1r.v v10, v7
; CHECK-NEXT: vmv1r.v v11, v7
; CHECK-NEXT: vlseg5e16.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlseg5.nxv1i16(i16* %base, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vlseg6e16.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlseg6.nxv1i16(i16* %base, i64 %vl)
; CHECK-NEXT: vmv1r.v v11, v7
; CHECK-NEXT: vmv1r.v v12, v7
; CHECK-NEXT: vlseg6e16.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlseg6.nxv1i16(i16* %base, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vlseg7e16.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlseg7.nxv1i16(i16* %base, i64 %vl)
; CHECK-NEXT: vmv1r.v v12, v7
; CHECK-NEXT: vmv1r.v v13, v7
; CHECK-NEXT: vlseg7e16.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlseg7.nxv1i16(i16* %base, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vlseg8e16.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlseg8.nxv1i16(i16* %base, i64 %vl)
; CHECK-NEXT: vmv1r.v v13, v7
; CHECK-NEXT: vmv1r.v v14, v7
; CHECK-NEXT: vlseg8e16.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlseg8.nxv1i16(i16* %base, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vlseg2e32.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlseg2.nxv2i32(i32* %base, i64 %vl)
; CHECK-NEXT: vlseg2e32.v v7, (a0)
; CHECK-NEXT: vmv1r.v v8, v7
; CHECK-NEXT: vlseg2e32.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlseg2.nxv2i32(i32* %base, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vlseg3e32.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlseg3.nxv2i32(i32* %base, i64 %vl)
; CHECK-NEXT: vmv1r.v v8, v7
; CHECK-NEXT: vmv1r.v v9, v7
; CHECK-NEXT: vlseg3e32.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlseg3.nxv2i32(i32* %base, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vlseg4e32.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlseg4.nxv2i32(i32* %base, i64 %vl)
; CHECK-NEXT: vmv1r.v v9, v7
; CHECK-NEXT: vmv1r.v v10, v7
; CHECK-NEXT: vlseg4e32.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlseg4.nxv2i32(i32* %base, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vlseg5e32.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlseg5.nxv2i32(i32* %base, i64 %vl)
; CHECK-NEXT: vmv1r.v v10, v7
; CHECK-NEXT: vmv1r.v v11, v7
; CHECK-NEXT: vlseg5e32.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlseg5.nxv2i32(i32* %base, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vlseg6e32.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlseg6.nxv2i32(i32* %base, i64 %vl)
; CHECK-NEXT: vmv1r.v v11, v7
; CHECK-NEXT: vmv1r.v v12, v7
; CHECK-NEXT: vlseg6e32.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlseg6.nxv2i32(i32* %base, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vlseg7e32.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlseg7.nxv2i32(i32* %base, i64 %vl)
; CHECK-NEXT: vmv1r.v v12, v7
; CHECK-NEXT: vmv1r.v v13, v7
; CHECK-NEXT: vlseg7e32.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlseg7.nxv2i32(i32* %base, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vlseg8e32.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlseg8.nxv2i32(i32* %base, i64 %vl)
; CHECK-NEXT: vmv1r.v v13, v7
; CHECK-NEXT: vmv1r.v v14, v7
; CHECK-NEXT: vlseg8e32.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlseg8.nxv2i32(i32* %base, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vlseg2e8.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlseg2.nxv8i8(i8* %base, i64 %vl)
; CHECK-NEXT: vlseg2e8.v v7, (a0)
; CHECK-NEXT: vmv1r.v v8, v7
; CHECK-NEXT: vlseg2e8.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlseg2.nxv8i8(i8* %base, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vlseg3e8.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlseg3.nxv8i8(i8* %base, i64 %vl)
; CHECK-NEXT: vmv1r.v v8, v7
; CHECK-NEXT: vmv1r.v v9, v7
; CHECK-NEXT: vlseg3e8.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlseg3.nxv8i8(i8* %base, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vlseg4e8.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlseg4.nxv8i8(i8* %base, i64 %vl)
; CHECK-NEXT: vmv1r.v v9, v7
; CHECK-NEXT: vmv1r.v v10, v7
; CHECK-NEXT: vlseg4e8.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlseg4.nxv8i8(i8* %base, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vlseg5e8.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlseg5.nxv8i8(i8* %base, i64 %vl)
; CHECK-NEXT: vmv1r.v v10, v7
; CHECK-NEXT: vmv1r.v v11, v7
; CHECK-NEXT: vlseg5e8.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlseg5.nxv8i8(i8* %base, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vlseg6e8.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlseg6.nxv8i8(i8* %base, i64 %vl)
; CHECK-NEXT: vmv1r.v v11, v7
; CHECK-NEXT: vmv1r.v v12, v7
; CHECK-NEXT: vlseg6e8.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlseg6.nxv8i8(i8* %base, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vlseg7e8.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlseg7.nxv8i8(i8* %base, i64 %vl)
; CHECK-NEXT: vmv1r.v v12, v7
; CHECK-NEXT: vmv1r.v v13, v7
; CHECK-NEXT: vlseg7e8.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlseg7.nxv8i8(i8* %base, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vlseg8e8.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlseg8.nxv8i8(i8* %base, i64 %vl)
; CHECK-NEXT: vmv1r.v v13, v7
; CHECK-NEXT: vmv1r.v v14, v7
; CHECK-NEXT: vlseg8e8.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlseg8.nxv8i8(i8* %base, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu
; CHECK-NEXT: vlseg2e64.v v4, (a0)
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i64>,<vscale x 4 x i64>} @llvm.riscv.vlseg2.nxv4i64(i64* %base, i64 %vl)
; CHECK-NEXT: vlseg2e64.v v4, (a0)
; CHECK-NEXT: vmv4r.v v8, v4
; CHECK-NEXT: vlseg2e64.v v4, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i64>,<vscale x 4 x i64>} @llvm.riscv.vlseg2.nxv4i64(i64* %base, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vlseg2e16.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlseg2.nxv4i16(i16* %base, i64 %vl)
; CHECK-NEXT: vlseg2e16.v v7, (a0)
; CHECK-NEXT: vmv1r.v v8, v7
; CHECK-NEXT: vlseg2e16.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlseg2.nxv4i16(i16* %base, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vlseg3e16.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlseg3.nxv4i16(i16* %base, i64 %vl)
; CHECK-NEXT: vmv1r.v v8, v7
; CHECK-NEXT: vmv1r.v v9, v7
; CHECK-NEXT: vlseg3e16.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlseg3.nxv4i16(i16* %base, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vlseg4e16.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlseg4.nxv4i16(i16* %base, i64 %vl)
; CHECK-NEXT: vmv1r.v v9, v7
; CHECK-NEXT: vmv1r.v v10, v7
; CHECK-NEXT: vlseg4e16.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlseg4.nxv4i16(i16* %base, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vlseg5e16.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlseg5.nxv4i16(i16* %base, i64 %vl)
; CHECK-NEXT: vmv1r.v v10, v7
; CHECK-NEXT: vmv1r.v v11, v7
; CHECK-NEXT: vlseg5e16.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlseg5.nxv4i16(i16* %base, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vlseg6e16.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlseg6.nxv4i16(i16* %base, i64 %vl)
; CHECK-NEXT: vmv1r.v v11, v7
; CHECK-NEXT: vmv1r.v v12, v7
; CHECK-NEXT: vlseg6e16.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlseg6.nxv4i16(i16* %base, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vlseg7e16.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlseg7.nxv4i16(i16* %base, i64 %vl)
; CHECK-NEXT: vmv1r.v v12, v7
; CHECK-NEXT: vmv1r.v v13, v7
; CHECK-NEXT: vlseg7e16.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlseg7.nxv4i16(i16* %base, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vlseg8e16.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlseg8.nxv4i16(i16* %base, i64 %vl)
; CHECK-NEXT: vmv1r.v v13, v7
; CHECK-NEXT: vmv1r.v v14, v7
; CHECK-NEXT: vlseg8e16.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlseg8.nxv4i16(i16* %base, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vlseg2e8.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlseg2.nxv1i8(i8* %base, i64 %vl)
; CHECK-NEXT: vlseg2e8.v v7, (a0)
; CHECK-NEXT: vmv1r.v v8, v7
; CHECK-NEXT: vlseg2e8.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlseg2.nxv1i8(i8* %base, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vlseg3e8.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlseg3.nxv1i8(i8* %base, i64 %vl)
; CHECK-NEXT: vmv1r.v v8, v7
; CHECK-NEXT: vmv1r.v v9, v7
; CHECK-NEXT: vlseg3e8.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlseg3.nxv1i8(i8* %base, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vlseg4e8.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlseg4.nxv1i8(i8* %base, i64 %vl)
; CHECK-NEXT: vmv1r.v v9, v7
; CHECK-NEXT: vmv1r.v v10, v7
; CHECK-NEXT: vlseg4e8.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlseg4.nxv1i8(i8* %base, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vlseg5e8.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlseg5.nxv1i8(i8* %base, i64 %vl)
; CHECK-NEXT: vmv1r.v v10, v7
; CHECK-NEXT: vmv1r.v v11, v7
; CHECK-NEXT: vlseg5e8.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlseg5.nxv1i8(i8* %base, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vlseg6e8.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlseg6.nxv1i8(i8* %base, i64 %vl)
; CHECK-NEXT: vmv1r.v v11, v7
; CHECK-NEXT: vmv1r.v v12, v7
; CHECK-NEXT: vlseg6e8.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlseg6.nxv1i8(i8* %base, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vlseg7e8.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlseg7.nxv1i8(i8* %base, i64 %vl)
; CHECK-NEXT: vmv1r.v v12, v7
; CHECK-NEXT: vmv1r.v v13, v7
; CHECK-NEXT: vlseg7e8.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlseg7.nxv1i8(i8* %base, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vlseg8e8.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlseg8.nxv1i8(i8* %base, i64 %vl)
; CHECK-NEXT: vmv1r.v v13, v7
; CHECK-NEXT: vmv1r.v v14, v7
; CHECK-NEXT: vlseg8e8.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlseg8.nxv1i8(i8* %base, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vlseg2e8.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlseg2.nxv2i8(i8* %base, i64 %vl)
; CHECK-NEXT: vlseg2e8.v v7, (a0)
; CHECK-NEXT: vmv1r.v v8, v7
; CHECK-NEXT: vlseg2e8.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlseg2.nxv2i8(i8* %base, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vlseg3e8.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlseg3.nxv2i8(i8* %base, i64 %vl)
; CHECK-NEXT: vmv1r.v v8, v7
; CHECK-NEXT: vmv1r.v v9, v7
; CHECK-NEXT: vlseg3e8.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlseg3.nxv2i8(i8* %base, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vlseg4e8.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlseg4.nxv2i8(i8* %base, i64 %vl)
; CHECK-NEXT: vmv1r.v v9, v7
; CHECK-NEXT: vmv1r.v v10, v7
; CHECK-NEXT: vlseg4e8.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlseg4.nxv2i8(i8* %base, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vlseg5e8.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlseg5.nxv2i8(i8* %base, i64 %vl)
; CHECK-NEXT: vmv1r.v v10, v7
; CHECK-NEXT: vmv1r.v v11, v7
; CHECK-NEXT: vlseg5e8.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlseg5.nxv2i8(i8* %base, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vlseg6e8.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlseg6.nxv2i8(i8* %base, i64 %vl)
; CHECK-NEXT: vmv1r.v v11, v7
; CHECK-NEXT: vmv1r.v v12, v7
; CHECK-NEXT: vlseg6e8.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlseg6.nxv2i8(i8* %base, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vlseg7e8.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlseg7.nxv2i8(i8* %base, i64 %vl)
; CHECK-NEXT: vmv1r.v v12, v7
; CHECK-NEXT: vmv1r.v v13, v7
; CHECK-NEXT: vlseg7e8.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlseg7.nxv2i8(i8* %base, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vlseg8e8.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlseg8.nxv2i8(i8* %base, i64 %vl)
; CHECK-NEXT: vmv1r.v v13, v7
; CHECK-NEXT: vmv1r.v v14, v7
; CHECK-NEXT: vlseg8e8.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlseg8.nxv2i8(i8* %base, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
; CHECK-NEXT: vlseg2e32.v v4, (a0)
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i32>,<vscale x 8 x i32>} @llvm.riscv.vlseg2.nxv8i32(i32* %base, i64 %vl)
; CHECK-NEXT: vlseg2e32.v v4, (a0)
; CHECK-NEXT: vmv4r.v v8, v4
; CHECK-NEXT: vlseg2e32.v v4, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i32>,<vscale x 8 x i32>} @llvm.riscv.vlseg2.nxv8i32(i32* %base, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu
; CHECK-NEXT: vlseg2e8.v v4, (a0)
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 32 x i8>,<vscale x 32 x i8>} @llvm.riscv.vlseg2.nxv32i8(i8* %base, i64 %vl)
; CHECK-NEXT: vlseg2e8.v v4, (a0)
; CHECK-NEXT: vmv4r.v v8, v4
; CHECK-NEXT: vlseg2e8.v v4, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 32 x i8>,<vscale x 32 x i8>} @llvm.riscv.vlseg2.nxv32i8(i8* %base, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vlseg2e16.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlseg2.nxv2i16(i16* %base, i64 %vl)
; CHECK-NEXT: vlseg2e16.v v7, (a0)
; CHECK-NEXT: vmv1r.v v8, v7
; CHECK-NEXT: vlseg2e16.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlseg2.nxv2i16(i16* %base, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vlseg3e16.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlseg3.nxv2i16(i16* %base, i64 %vl)
; CHECK-NEXT: vmv1r.v v8, v7
; CHECK-NEXT: vmv1r.v v9, v7
; CHECK-NEXT: vlseg3e16.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlseg3.nxv2i16(i16* %base, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vlseg4e16.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlseg4.nxv2i16(i16* %base, i64 %vl)
; CHECK-NEXT: vmv1r.v v9, v7
; CHECK-NEXT: vmv1r.v v10, v7
; CHECK-NEXT: vlseg4e16.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlseg4.nxv2i16(i16* %base, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vlseg5e16.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlseg5.nxv2i16(i16* %base, i64 %vl)
; CHECK-NEXT: vmv1r.v v10, v7
; CHECK-NEXT: vmv1r.v v11, v7
; CHECK-NEXT: vlseg5e16.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlseg5.nxv2i16(i16* %base, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vlseg6e16.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlseg6.nxv2i16(i16* %base, i64 %vl)
; CHECK-NEXT: vmv1r.v v11, v7
; CHECK-NEXT: vmv1r.v v12, v7
; CHECK-NEXT: vlseg6e16.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlseg6.nxv2i16(i16* %base, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vlseg7e16.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlseg7.nxv2i16(i16* %base, i64 %vl)
; CHECK-NEXT: vmv1r.v v12, v7
; CHECK-NEXT: vmv1r.v v13, v7
; CHECK-NEXT: vlseg7e16.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlseg7.nxv2i16(i16* %base, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vlseg8e16.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlseg8.nxv2i16(i16* %base, i64 %vl)
; CHECK-NEXT: vmv1r.v v13, v7
; CHECK-NEXT: vmv1r.v v14, v7
; CHECK-NEXT: vlseg8e16.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlseg8.nxv2i16(i16* %base, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
; CHECK-NEXT: vlseg2e64.v v6, (a0)
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i64>,<vscale x 2 x i64>} @llvm.riscv.vlseg2.nxv2i64(i64* %base, i64 %vl)
; CHECK-NEXT: vlseg2e64.v v6, (a0)
; CHECK-NEXT: vmv2r.v v8, v6
; CHECK-NEXT: vlseg2e64.v v6, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i64>,<vscale x 2 x i64>} @llvm.riscv.vlseg2.nxv2i64(i64* %base, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
; CHECK-NEXT: vlseg3e64.v v6, (a0)
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>} @llvm.riscv.vlseg3.nxv2i64(i64* %base, i64 %vl)
; CHECK-NEXT: vmv2r.v v8, v6
; CHECK-NEXT: vmv2r.v v10, v6
; CHECK-NEXT: vlseg3e64.v v6, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>} @llvm.riscv.vlseg3.nxv2i64(i64* %base, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
; CHECK-NEXT: vlseg4e64.v v6, (a0)
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>} @llvm.riscv.vlseg4.nxv2i64(i64* %base, i64 %vl)
; CHECK-NEXT: vmv2r.v v10, v6
; CHECK-NEXT: vmv2r.v v12, v6
; CHECK-NEXT: vlseg4e64.v v6, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>} @llvm.riscv.vlseg4.nxv2i64(i64* %base, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
; CHECK-NEXT: vlseg2e16.v v4, (a0)
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 16 x half>,<vscale x 16 x half>} @llvm.riscv.vlseg2.nxv16f16(half* %base, i64 %vl)
; CHECK-NEXT: vlseg2e16.v v4, (a0)
; CHECK-NEXT: vmv4r.v v8, v4
; CHECK-NEXT: vlseg2e16.v v4, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 16 x half>,<vscale x 16 x half>} @llvm.riscv.vlseg2.nxv16f16(half* %base, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu
; CHECK-NEXT: vlseg2e64.v v4, (a0)
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x double>,<vscale x 4 x double>} @llvm.riscv.vlseg2.nxv4f64(double* %base, i64 %vl)
; CHECK-NEXT: vlseg2e64.v v4, (a0)
; CHECK-NEXT: vmv4r.v v8, v4
; CHECK-NEXT: vlseg2e64.v v4, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x double>,<vscale x 4 x double>} @llvm.riscv.vlseg2.nxv4f64(double* %base, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vlseg2e64.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlseg2.nxv1f64(double* %base, i64 %vl)
; CHECK-NEXT: vlseg2e64.v v7, (a0)
; CHECK-NEXT: vmv1r.v v8, v7
; CHECK-NEXT: vlseg2e64.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlseg2.nxv1f64(double* %base, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vlseg3e64.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlseg3.nxv1f64(double* %base, i64 %vl)
; CHECK-NEXT: vmv1r.v v8, v7
; CHECK-NEXT: vmv1r.v v9, v7
; CHECK-NEXT: vlseg3e64.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlseg3.nxv1f64(double* %base, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vlseg4e64.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlseg4.nxv1f64(double* %base, i64 %vl)
; CHECK-NEXT: vmv1r.v v9, v7
; CHECK-NEXT: vmv1r.v v10, v7
; CHECK-NEXT: vlseg4e64.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlseg4.nxv1f64(double* %base, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vlseg5e64.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlseg5.nxv1f64(double* %base, i64 %vl)
; CHECK-NEXT: vmv1r.v v10, v7
; CHECK-NEXT: vmv1r.v v11, v7
; CHECK-NEXT: vlseg5e64.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlseg5.nxv1f64(double* %base, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vlseg6e64.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlseg6.nxv1f64(double* %base, i64 %vl)
; CHECK-NEXT: vmv1r.v v11, v7
; CHECK-NEXT: vmv1r.v v12, v7
; CHECK-NEXT: vlseg6e64.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlseg6.nxv1f64(double* %base, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vlseg7e64.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlseg7.nxv1f64(double* %base, i64 %vl)
; CHECK-NEXT: vmv1r.v v12, v7
; CHECK-NEXT: vmv1r.v v13, v7
; CHECK-NEXT: vlseg7e64.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlseg7.nxv1f64(double* %base, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vlseg8e64.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlseg8.nxv1f64(double* %base, i64 %vl)
; CHECK-NEXT: vmv1r.v v13, v7
; CHECK-NEXT: vmv1r.v v14, v7
; CHECK-NEXT: vlseg8e64.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlseg8.nxv1f64(double* %base, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vlseg2e32.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlseg2.nxv2f32(float* %base, i64 %vl)
; CHECK-NEXT: vlseg2e32.v v7, (a0)
; CHECK-NEXT: vmv1r.v v8, v7
; CHECK-NEXT: vlseg2e32.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlseg2.nxv2f32(float* %base, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vlseg3e32.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlseg3.nxv2f32(float* %base, i64 %vl)
; CHECK-NEXT: vmv1r.v v8, v7
; CHECK-NEXT: vmv1r.v v9, v7
; CHECK-NEXT: vlseg3e32.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlseg3.nxv2f32(float* %base, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vlseg4e32.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlseg4.nxv2f32(float* %base, i64 %vl)
; CHECK-NEXT: vmv1r.v v9, v7
; CHECK-NEXT: vmv1r.v v10, v7
; CHECK-NEXT: vlseg4e32.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlseg4.nxv2f32(float* %base, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vlseg5e32.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlseg5.nxv2f32(float* %base, i64 %vl)
; CHECK-NEXT: vmv1r.v v10, v7
; CHECK-NEXT: vmv1r.v v11, v7
; CHECK-NEXT: vlseg5e32.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlseg5.nxv2f32(float* %base, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vlseg6e32.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlseg6.nxv2f32(float* %base, i64 %vl)
; CHECK-NEXT: vmv1r.v v11, v7
; CHECK-NEXT: vmv1r.v v12, v7
; CHECK-NEXT: vlseg6e32.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlseg6.nxv2f32(float* %base, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vlseg7e32.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlseg7.nxv2f32(float* %base, i64 %vl)
; CHECK-NEXT: vmv1r.v v12, v7
; CHECK-NEXT: vmv1r.v v13, v7
; CHECK-NEXT: vlseg7e32.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlseg7.nxv2f32(float* %base, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vlseg8e32.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlseg8.nxv2f32(float* %base, i64 %vl)
; CHECK-NEXT: vmv1r.v v13, v7
; CHECK-NEXT: vmv1r.v v14, v7
; CHECK-NEXT: vlseg8e32.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlseg8.nxv2f32(float* %base, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vlseg2e16.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlseg2.nxv1f16(half* %base, i64 %vl)
; CHECK-NEXT: vlseg2e16.v v7, (a0)
; CHECK-NEXT: vmv1r.v v8, v7
; CHECK-NEXT: vlseg2e16.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlseg2.nxv1f16(half* %base, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vlseg3e16.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlseg3.nxv1f16(half* %base, i64 %vl)
; CHECK-NEXT: vmv1r.v v8, v7
; CHECK-NEXT: vmv1r.v v9, v7
; CHECK-NEXT: vlseg3e16.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlseg3.nxv1f16(half* %base, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vlseg4e16.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlseg4.nxv1f16(half* %base, i64 %vl)
; CHECK-NEXT: vmv1r.v v9, v7
; CHECK-NEXT: vmv1r.v v10, v7
; CHECK-NEXT: vlseg4e16.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlseg4.nxv1f16(half* %base, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vlseg5e16.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlseg5.nxv1f16(half* %base, i64 %vl)
; CHECK-NEXT: vmv1r.v v10, v7
; CHECK-NEXT: vmv1r.v v11, v7
; CHECK-NEXT: vlseg5e16.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlseg5.nxv1f16(half* %base, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vlseg6e16.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlseg6.nxv1f16(half* %base, i64 %vl)
; CHECK-NEXT: vmv1r.v v11, v7
; CHECK-NEXT: vmv1r.v v12, v7
; CHECK-NEXT: vlseg6e16.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlseg6.nxv1f16(half* %base, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vlseg7e16.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlseg7.nxv1f16(half* %base, i64 %vl)
; CHECK-NEXT: vmv1r.v v12, v7
; CHECK-NEXT: vmv1r.v v13, v7
; CHECK-NEXT: vlseg7e16.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlseg7.nxv1f16(half* %base, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vlseg8e16.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlseg8.nxv1f16(half* %base, i64 %vl)
; CHECK-NEXT: vmv1r.v v13, v7
; CHECK-NEXT: vmv1r.v v14, v7
; CHECK-NEXT: vlseg8e16.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlseg8.nxv1f16(half* %base, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vlseg2e32.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlseg2.nxv1f32(float* %base, i64 %vl)
; CHECK-NEXT: vlseg2e32.v v7, (a0)
; CHECK-NEXT: vmv1r.v v8, v7
; CHECK-NEXT: vlseg2e32.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlseg2.nxv1f32(float* %base, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vlseg3e32.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlseg3.nxv1f32(float* %base, i64 %vl)
; CHECK-NEXT: vmv1r.v v8, v7
; CHECK-NEXT: vmv1r.v v9, v7
; CHECK-NEXT: vlseg3e32.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlseg3.nxv1f32(float* %base, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vlseg4e32.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlseg4.nxv1f32(float* %base, i64 %vl)
; CHECK-NEXT: vmv1r.v v9, v7
; CHECK-NEXT: vmv1r.v v10, v7
; CHECK-NEXT: vlseg4e32.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlseg4.nxv1f32(float* %base, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vlseg5e32.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlseg5.nxv1f32(float* %base, i64 %vl)
; CHECK-NEXT: vmv1r.v v10, v7
; CHECK-NEXT: vmv1r.v v11, v7
; CHECK-NEXT: vlseg5e32.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlseg5.nxv1f32(float* %base, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vlseg6e32.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlseg6.nxv1f32(float* %base, i64 %vl)
; CHECK-NEXT: vmv1r.v v11, v7
; CHECK-NEXT: vmv1r.v v12, v7
; CHECK-NEXT: vlseg6e32.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlseg6.nxv1f32(float* %base, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vlseg7e32.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlseg7.nxv1f32(float* %base, i64 %vl)
; CHECK-NEXT: vmv1r.v v12, v7
; CHECK-NEXT: vmv1r.v v13, v7
; CHECK-NEXT: vlseg7e32.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlseg7.nxv1f32(float* %base, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vlseg8e32.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlseg8.nxv1f32(float* %base, i64 %vl)
; CHECK-NEXT: vmv1r.v v13, v7
; CHECK-NEXT: vmv1r.v v14, v7
; CHECK-NEXT: vlseg8e32.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlseg8.nxv1f32(float* %base, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
; CHECK-NEXT: vlseg2e16.v v6, (a0)
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vlseg2.nxv8f16(half* %base, i64 %vl)
; CHECK-NEXT: vlseg2e16.v v6, (a0)
; CHECK-NEXT: vmv2r.v v8, v6
; CHECK-NEXT: vlseg2e16.v v6, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vlseg2.nxv8f16(half* %base, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
; CHECK-NEXT: vlseg3e16.v v6, (a0)
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vlseg3.nxv8f16(half* %base, i64 %vl)
; CHECK-NEXT: vmv2r.v v8, v6
; CHECK-NEXT: vmv2r.v v10, v6
; CHECK-NEXT: vlseg3e16.v v6, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vlseg3.nxv8f16(half* %base, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
; CHECK-NEXT: vlseg4e16.v v6, (a0)
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vlseg4.nxv8f16(half* %base, i64 %vl)
; CHECK-NEXT: vmv2r.v v10, v6
; CHECK-NEXT: vmv2r.v v12, v6
; CHECK-NEXT: vlseg4e16.v v6, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vlseg4.nxv8f16(half* %base, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
; CHECK-NEXT: vlseg2e32.v v4, (a0)
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x float>,<vscale x 8 x float>} @llvm.riscv.vlseg2.nxv8f32(float* %base, i64 %vl)
; CHECK-NEXT: vlseg2e32.v v4, (a0)
; CHECK-NEXT: vmv4r.v v8, v4
; CHECK-NEXT: vlseg2e32.v v4, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x float>,<vscale x 8 x float>} @llvm.riscv.vlseg2.nxv8f32(float* %base, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
; CHECK-NEXT: vlseg2e64.v v6, (a0)
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vlseg2.nxv2f64(double* %base, i64 %vl)
; CHECK-NEXT: vlseg2e64.v v6, (a0)
; CHECK-NEXT: vmv2r.v v8, v6
; CHECK-NEXT: vlseg2e64.v v6, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vlseg2.nxv2f64(double* %base, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
; CHECK-NEXT: vlseg3e64.v v6, (a0)
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vlseg3.nxv2f64(double* %base, i64 %vl)
; CHECK-NEXT: vmv2r.v v8, v6
; CHECK-NEXT: vmv2r.v v10, v6
; CHECK-NEXT: vlseg3e64.v v6, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vlseg3.nxv2f64(double* %base, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
; CHECK-NEXT: vlseg4e64.v v6, (a0)
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vlseg4.nxv2f64(double* %base, i64 %vl)
; CHECK-NEXT: vmv2r.v v10, v6
; CHECK-NEXT: vmv2r.v v12, v6
; CHECK-NEXT: vlseg4e64.v v6, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vlseg4.nxv2f64(double* %base, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vlseg2e16.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlseg2.nxv4f16(half* %base, i64 %vl)
; CHECK-NEXT: vlseg2e16.v v7, (a0)
; CHECK-NEXT: vmv1r.v v8, v7
; CHECK-NEXT: vlseg2e16.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlseg2.nxv4f16(half* %base, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vlseg3e16.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlseg3.nxv4f16(half* %base, i64 %vl)
; CHECK-NEXT: vmv1r.v v8, v7
; CHECK-NEXT: vmv1r.v v9, v7
; CHECK-NEXT: vlseg3e16.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlseg3.nxv4f16(half* %base, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vlseg4e16.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlseg4.nxv4f16(half* %base, i64 %vl)
; CHECK-NEXT: vmv1r.v v9, v7
; CHECK-NEXT: vmv1r.v v10, v7
; CHECK-NEXT: vlseg4e16.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlseg4.nxv4f16(half* %base, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vlseg5e16.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlseg5.nxv4f16(half* %base, i64 %vl)
; CHECK-NEXT: vmv1r.v v10, v7
; CHECK-NEXT: vmv1r.v v11, v7
; CHECK-NEXT: vlseg5e16.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlseg5.nxv4f16(half* %base, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vlseg6e16.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlseg6.nxv4f16(half* %base, i64 %vl)
; CHECK-NEXT: vmv1r.v v11, v7
; CHECK-NEXT: vmv1r.v v12, v7
; CHECK-NEXT: vlseg6e16.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlseg6.nxv4f16(half* %base, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vlseg7e16.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlseg7.nxv4f16(half* %base, i64 %vl)
; CHECK-NEXT: vmv1r.v v12, v7
; CHECK-NEXT: vmv1r.v v13, v7
; CHECK-NEXT: vlseg7e16.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlseg7.nxv4f16(half* %base, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vlseg8e16.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlseg8.nxv4f16(half* %base, i64 %vl)
; CHECK-NEXT: vmv1r.v v13, v7
; CHECK-NEXT: vmv1r.v v14, v7
; CHECK-NEXT: vlseg8e16.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlseg8.nxv4f16(half* %base, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vlseg2e16.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlseg2.nxv2f16(half* %base, i64 %vl)
; CHECK-NEXT: vlseg2e16.v v7, (a0)
; CHECK-NEXT: vmv1r.v v8, v7
; CHECK-NEXT: vlseg2e16.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlseg2.nxv2f16(half* %base, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vlseg3e16.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlseg3.nxv2f16(half* %base, i64 %vl)
; CHECK-NEXT: vmv1r.v v8, v7
; CHECK-NEXT: vmv1r.v v9, v7
; CHECK-NEXT: vlseg3e16.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlseg3.nxv2f16(half* %base, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vlseg4e16.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlseg4.nxv2f16(half* %base, i64 %vl)
; CHECK-NEXT: vmv1r.v v9, v7
; CHECK-NEXT: vmv1r.v v10, v7
; CHECK-NEXT: vlseg4e16.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlseg4.nxv2f16(half* %base, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vlseg5e16.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlseg5.nxv2f16(half* %base, i64 %vl)
; CHECK-NEXT: vmv1r.v v10, v7
; CHECK-NEXT: vmv1r.v v11, v7
; CHECK-NEXT: vlseg5e16.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlseg5.nxv2f16(half* %base, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vlseg6e16.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlseg6.nxv2f16(half* %base, i64 %vl)
; CHECK-NEXT: vmv1r.v v11, v7
; CHECK-NEXT: vmv1r.v v12, v7
; CHECK-NEXT: vlseg6e16.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlseg6.nxv2f16(half* %base, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vlseg7e16.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlseg7.nxv2f16(half* %base, i64 %vl)
; CHECK-NEXT: vmv1r.v v12, v7
; CHECK-NEXT: vmv1r.v v13, v7
; CHECK-NEXT: vlseg7e16.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlseg7.nxv2f16(half* %base, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vlseg8e16.v v7, (a0)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlseg8.nxv2f16(half* %base, i64 %vl)
; CHECK-NEXT: vmv1r.v v13, v7
; CHECK-NEXT: vmv1r.v v14, v7
; CHECK-NEXT: vlseg8e16.v v7, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlseg8.nxv2f16(half* %base, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; CHECK-NEXT: vlseg2e32.v v6, (a0)
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vlseg2.nxv4f32(float* %base, i64 %vl)
; CHECK-NEXT: vlseg2e32.v v6, (a0)
; CHECK-NEXT: vmv2r.v v8, v6
; CHECK-NEXT: vlseg2e32.v v6, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vlseg2.nxv4f32(float* %base, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; CHECK-NEXT: vlseg3e32.v v6, (a0)
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vlseg3.nxv4f32(float* %base, i64 %vl)
; CHECK-NEXT: vmv2r.v v8, v6
; CHECK-NEXT: vmv2r.v v10, v6
; CHECK-NEXT: vlseg3e32.v v6, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vlseg3.nxv4f32(float* %base, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; CHECK-NEXT: vlseg4e32.v v6, (a0)
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vlseg4.nxv4f32(float* %base, i64 %vl)
; CHECK-NEXT: vmv2r.v v10, v6
; CHECK-NEXT: vmv2r.v v12, v6
; CHECK-NEXT: vlseg4e32.v v6, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vlseg4.nxv4f32(float* %base, i64 %vl)
define void @test_vlseg2ff_mask_dead_value(<vscale x 16 x i16> %val, i16* %base, i32 %vl, <vscale x 16 x i1> %mask, i32* %outvl) {
; CHECK-LABEL: test_vlseg2ff_mask_dead_value:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
; CHECK-NEXT: vlseg2e16ff.v v8, (a0), v0.t
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
; CHECK-NEXT: vlseg2e16ff.v v4, (a0)
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 16 x i16>,<vscale x 16 x i16>, i32} @llvm.riscv.vlseg2ff.nxv16i16(i16* %base, i32 %vl)
; CHECK-NEXT: vmv4r.v v4, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
; CHECK-NEXT: vlseg2e16ff.v v4, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 16 x i16>,<vscale x 16 x i16>, i32} @llvm.riscv.vlseg2ff.mask.nxv16i16(<vscale x 16 x i16> %val,<vscale x 16 x i16> %val, i16* %base, <vscale x 16 x i1> %mask, i32 %vl, i32 1)
define void @test_vlseg2ff_mask_dead_all(<vscale x 16 x i16> %val, i16* %base, i32 %vl, <vscale x 16 x i1> %mask) {
; CHECK-LABEL: test_vlseg2ff_mask_dead_all:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
; CHECK-NEXT: vlseg2e16ff.v v8, (a0), v0.t
; CHECK-NEXT: vlseg2e16ff.v v4, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 16 x i16>,<vscale x 16 x i16>, i32} @llvm.riscv.vlseg2ff.nxv16i16(i16* %base, i32 %vl)
; CHECK-NEXT: vlseg2e16ff.v v4, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 16 x i16>,<vscale x 16 x i16>, i32} @llvm.riscv.vlseg2ff.mask.nxv16i16(<vscale x 16 x i16> %val,<vscale x 16 x i16> %val, i16* %base, <vscale x 16 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vlseg2e8ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>, i32} @llvm.riscv.vlseg2ff.nxv1i8(i8* %base, i32 %vl)
; CHECK-NEXT: vlseg2e8ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>, i32} @llvm.riscv.vlseg2ff.mask.nxv1i8(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vlseg3e8ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i32} @llvm.riscv.vlseg3ff.nxv1i8(i8* %base, i32 %vl)
; CHECK-LABEL: test_vlseg3ff_mask_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vlseg3e8ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i32} @llvm.riscv.vlseg3ff.mask.nxv1i8(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vlseg4e8ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i32} @llvm.riscv.vlseg4ff.nxv1i8(i8* %base, i32 %vl)
; CHECK-LABEL: test_vlseg4ff_mask_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
-; CHECK-NEXT: vmv1r.v v10, v7
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vlseg4e8ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i32} @llvm.riscv.vlseg4ff.mask.nxv1i8(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vlseg5e8ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i32} @llvm.riscv.vlseg5ff.nxv1i8(i8* %base, i32 %vl)
; CHECK-LABEL: test_vlseg5ff_mask_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
-; CHECK-NEXT: vmv1r.v v10, v7
-; CHECK-NEXT: vmv1r.v v11, v7
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vmv1r.v v11, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vlseg5e8ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i32} @llvm.riscv.vlseg5ff.mask.nxv1i8(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vlseg6e8ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i32} @llvm.riscv.vlseg6ff.nxv1i8(i8* %base, i32 %vl)
; CHECK-LABEL: test_vlseg6ff_mask_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
-; CHECK-NEXT: vmv1r.v v10, v7
-; CHECK-NEXT: vmv1r.v v11, v7
-; CHECK-NEXT: vmv1r.v v12, v7
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vlseg6e8ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i32} @llvm.riscv.vlseg6ff.mask.nxv1i8(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vlseg7e8ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i32} @llvm.riscv.vlseg7ff.nxv1i8(i8* %base, i32 %vl)
; CHECK-LABEL: test_vlseg7ff_mask_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
-; CHECK-NEXT: vmv1r.v v10, v7
-; CHECK-NEXT: vmv1r.v v11, v7
-; CHECK-NEXT: vmv1r.v v12, v7
-; CHECK-NEXT: vmv1r.v v13, v7
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vlseg7e8ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i32} @llvm.riscv.vlseg7ff.mask.nxv1i8(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vlseg8e8ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i32} @llvm.riscv.vlseg8ff.nxv1i8(i8* %base, i32 %vl)
; CHECK-LABEL: test_vlseg8ff_mask_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
-; CHECK-NEXT: vmv1r.v v10, v7
-; CHECK-NEXT: vmv1r.v v11, v7
-; CHECK-NEXT: vmv1r.v v12, v7
-; CHECK-NEXT: vmv1r.v v13, v7
-; CHECK-NEXT: vmv1r.v v14, v7
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vlseg8e8ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i32} @llvm.riscv.vlseg8ff.mask.nxv1i8(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vlseg2e8ff.v v6, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>, i32} @llvm.riscv.vlseg2ff.nxv16i8(i8* %base, i32 %vl)
; CHECK-NEXT: vlseg2e8ff.v v6, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>, i32} @llvm.riscv.vlseg2ff.mask.nxv16i8(<vscale x 16 x i8> %val,<vscale x 16 x i8> %val, i8* %base, <vscale x 16 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vlseg3e8ff.v v6, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>, i32} @llvm.riscv.vlseg3ff.nxv16i8(i8* %base, i32 %vl)
; CHECK-LABEL: test_vlseg3ff_mask_nxv16i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v6, v8
-; CHECK-NEXT: vmv2r.v v10, v6
+; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
; CHECK-NEXT: vlseg3e8ff.v v6, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>, i32} @llvm.riscv.vlseg3ff.mask.nxv16i8(<vscale x 16 x i8> %val,<vscale x 16 x i8> %val,<vscale x 16 x i8> %val, i8* %base, <vscale x 16 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vlseg4e8ff.v v6, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>, i32} @llvm.riscv.vlseg4ff.nxv16i8(i8* %base, i32 %vl)
; CHECK-LABEL: test_vlseg4ff_mask_nxv16i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v6, v8
-; CHECK-NEXT: vmv2r.v v10, v6
-; CHECK-NEXT: vmv2r.v v12, v6
+; CHECK-NEXT: vmv2r.v v10, v8
+; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
; CHECK-NEXT: vlseg4e8ff.v v6, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>, i32} @llvm.riscv.vlseg4ff.mask.nxv16i8(<vscale x 16 x i8> %val,<vscale x 16 x i8> %val,<vscale x 16 x i8> %val,<vscale x 16 x i8> %val, i8* %base, <vscale x 16 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vlseg2e32ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>, i32} @llvm.riscv.vlseg2ff.nxv2i32(i32* %base, i32 %vl)
; CHECK-NEXT: vlseg2e32ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>, i32} @llvm.riscv.vlseg2ff.mask.nxv2i32(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vlseg3e32ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32} @llvm.riscv.vlseg3ff.nxv2i32(i32* %base, i32 %vl)
; CHECK-LABEL: test_vlseg3ff_mask_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vlseg3e32ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32} @llvm.riscv.vlseg3ff.mask.nxv2i32(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vlseg4e32ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32} @llvm.riscv.vlseg4ff.nxv2i32(i32* %base, i32 %vl)
; CHECK-LABEL: test_vlseg4ff_mask_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
-; CHECK-NEXT: vmv1r.v v10, v7
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vlseg4e32ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32} @llvm.riscv.vlseg4ff.mask.nxv2i32(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vlseg5e32ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32} @llvm.riscv.vlseg5ff.nxv2i32(i32* %base, i32 %vl)
; CHECK-LABEL: test_vlseg5ff_mask_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
-; CHECK-NEXT: vmv1r.v v10, v7
-; CHECK-NEXT: vmv1r.v v11, v7
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vmv1r.v v11, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vlseg5e32ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32} @llvm.riscv.vlseg5ff.mask.nxv2i32(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vlseg6e32ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32} @llvm.riscv.vlseg6ff.nxv2i32(i32* %base, i32 %vl)
; CHECK-LABEL: test_vlseg6ff_mask_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
-; CHECK-NEXT: vmv1r.v v10, v7
-; CHECK-NEXT: vmv1r.v v11, v7
-; CHECK-NEXT: vmv1r.v v12, v7
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vlseg6e32ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32} @llvm.riscv.vlseg6ff.mask.nxv2i32(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vlseg7e32ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32} @llvm.riscv.vlseg7ff.nxv2i32(i32* %base, i32 %vl)
; CHECK-LABEL: test_vlseg7ff_mask_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
-; CHECK-NEXT: vmv1r.v v10, v7
-; CHECK-NEXT: vmv1r.v v11, v7
-; CHECK-NEXT: vmv1r.v v12, v7
-; CHECK-NEXT: vmv1r.v v13, v7
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vlseg7e32ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32} @llvm.riscv.vlseg7ff.mask.nxv2i32(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vlseg8e32ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32} @llvm.riscv.vlseg8ff.nxv2i32(i32* %base, i32 %vl)
; CHECK-LABEL: test_vlseg8ff_mask_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
-; CHECK-NEXT: vmv1r.v v10, v7
-; CHECK-NEXT: vmv1r.v v11, v7
-; CHECK-NEXT: vmv1r.v v12, v7
-; CHECK-NEXT: vmv1r.v v13, v7
-; CHECK-NEXT: vmv1r.v v14, v7
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vlseg8e32ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32} @llvm.riscv.vlseg8ff.mask.nxv2i32(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vlseg2e16ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>, i32} @llvm.riscv.vlseg2ff.nxv4i16(i16* %base, i32 %vl)
; CHECK-NEXT: vlseg2e16ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>, i32} @llvm.riscv.vlseg2ff.mask.nxv4i16(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vlseg3e16ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i32} @llvm.riscv.vlseg3ff.nxv4i16(i16* %base, i32 %vl)
; CHECK-LABEL: test_vlseg3ff_mask_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vlseg3e16ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i32} @llvm.riscv.vlseg3ff.mask.nxv4i16(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vlseg4e16ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i32} @llvm.riscv.vlseg4ff.nxv4i16(i16* %base, i32 %vl)
; CHECK-LABEL: test_vlseg4ff_mask_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
-; CHECK-NEXT: vmv1r.v v10, v7
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vlseg4e16ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i32} @llvm.riscv.vlseg4ff.mask.nxv4i16(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vlseg5e16ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i32} @llvm.riscv.vlseg5ff.nxv4i16(i16* %base, i32 %vl)
; CHECK-LABEL: test_vlseg5ff_mask_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
-; CHECK-NEXT: vmv1r.v v10, v7
-; CHECK-NEXT: vmv1r.v v11, v7
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vmv1r.v v11, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vlseg5e16ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i32} @llvm.riscv.vlseg5ff.mask.nxv4i16(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vlseg6e16ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i32} @llvm.riscv.vlseg6ff.nxv4i16(i16* %base, i32 %vl)
; CHECK-LABEL: test_vlseg6ff_mask_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
-; CHECK-NEXT: vmv1r.v v10, v7
-; CHECK-NEXT: vmv1r.v v11, v7
-; CHECK-NEXT: vmv1r.v v12, v7
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vlseg6e16ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i32} @llvm.riscv.vlseg6ff.mask.nxv4i16(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vlseg7e16ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i32} @llvm.riscv.vlseg7ff.nxv4i16(i16* %base, i32 %vl)
; CHECK-LABEL: test_vlseg7ff_mask_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
-; CHECK-NEXT: vmv1r.v v10, v7
-; CHECK-NEXT: vmv1r.v v11, v7
-; CHECK-NEXT: vmv1r.v v12, v7
-; CHECK-NEXT: vmv1r.v v13, v7
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vlseg7e16ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i32} @llvm.riscv.vlseg7ff.mask.nxv4i16(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vlseg8e16ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i32} @llvm.riscv.vlseg8ff.nxv4i16(i16* %base, i32 %vl)
; CHECK-LABEL: test_vlseg8ff_mask_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
-; CHECK-NEXT: vmv1r.v v10, v7
-; CHECK-NEXT: vmv1r.v v11, v7
-; CHECK-NEXT: vmv1r.v v12, v7
-; CHECK-NEXT: vmv1r.v v13, v7
-; CHECK-NEXT: vmv1r.v v14, v7
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vlseg8e16ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i32} @llvm.riscv.vlseg8ff.mask.nxv4i16(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vlseg2e32ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>, i32} @llvm.riscv.vlseg2ff.nxv1i32(i32* %base, i32 %vl)
; CHECK-NEXT: vlseg2e32ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>, i32} @llvm.riscv.vlseg2ff.mask.nxv1i32(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vlseg3e32ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32} @llvm.riscv.vlseg3ff.nxv1i32(i32* %base, i32 %vl)
; CHECK-LABEL: test_vlseg3ff_mask_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vlseg3e32ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32} @llvm.riscv.vlseg3ff.mask.nxv1i32(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vlseg4e32ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32} @llvm.riscv.vlseg4ff.nxv1i32(i32* %base, i32 %vl)
; CHECK-LABEL: test_vlseg4ff_mask_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
-; CHECK-NEXT: vmv1r.v v10, v7
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vlseg4e32ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32} @llvm.riscv.vlseg4ff.mask.nxv1i32(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vlseg5e32ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32} @llvm.riscv.vlseg5ff.nxv1i32(i32* %base, i32 %vl)
; CHECK-LABEL: test_vlseg5ff_mask_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
-; CHECK-NEXT: vmv1r.v v10, v7
-; CHECK-NEXT: vmv1r.v v11, v7
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vmv1r.v v11, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vlseg5e32ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32} @llvm.riscv.vlseg5ff.mask.nxv1i32(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vlseg6e32ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32} @llvm.riscv.vlseg6ff.nxv1i32(i32* %base, i32 %vl)
; CHECK-LABEL: test_vlseg6ff_mask_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
-; CHECK-NEXT: vmv1r.v v10, v7
-; CHECK-NEXT: vmv1r.v v11, v7
-; CHECK-NEXT: vmv1r.v v12, v7
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vlseg6e32ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32} @llvm.riscv.vlseg6ff.mask.nxv1i32(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vlseg7e32ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32} @llvm.riscv.vlseg7ff.nxv1i32(i32* %base, i32 %vl)
; CHECK-LABEL: test_vlseg7ff_mask_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
-; CHECK-NEXT: vmv1r.v v10, v7
-; CHECK-NEXT: vmv1r.v v11, v7
-; CHECK-NEXT: vmv1r.v v12, v7
-; CHECK-NEXT: vmv1r.v v13, v7
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vlseg7e32ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32} @llvm.riscv.vlseg7ff.mask.nxv1i32(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vlseg8e32ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32} @llvm.riscv.vlseg8ff.nxv1i32(i32* %base, i32 %vl)
; CHECK-LABEL: test_vlseg8ff_mask_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
-; CHECK-NEXT: vmv1r.v v10, v7
-; CHECK-NEXT: vmv1r.v v11, v7
-; CHECK-NEXT: vmv1r.v v12, v7
-; CHECK-NEXT: vmv1r.v v13, v7
-; CHECK-NEXT: vmv1r.v v14, v7
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vlseg8e32ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32} @llvm.riscv.vlseg8ff.mask.nxv1i32(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vlseg2e16ff.v v6, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>, i32} @llvm.riscv.vlseg2ff.nxv8i16(i16* %base, i32 %vl)
; CHECK-NEXT: vlseg2e16ff.v v6, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>, i32} @llvm.riscv.vlseg2ff.mask.nxv8i16(<vscale x 8 x i16> %val,<vscale x 8 x i16> %val, i16* %base, <vscale x 8 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vlseg3e16ff.v v6, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>, i32} @llvm.riscv.vlseg3ff.nxv8i16(i16* %base, i32 %vl)
; CHECK-LABEL: test_vlseg3ff_mask_nxv8i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v6, v8
-; CHECK-NEXT: vmv2r.v v10, v6
+; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
; CHECK-NEXT: vlseg3e16ff.v v6, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>, i32} @llvm.riscv.vlseg3ff.mask.nxv8i16(<vscale x 8 x i16> %val,<vscale x 8 x i16> %val,<vscale x 8 x i16> %val, i16* %base, <vscale x 8 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vlseg4e16ff.v v6, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>, i32} @llvm.riscv.vlseg4ff.nxv8i16(i16* %base, i32 %vl)
; CHECK-LABEL: test_vlseg4ff_mask_nxv8i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v6, v8
-; CHECK-NEXT: vmv2r.v v10, v6
-; CHECK-NEXT: vmv2r.v v12, v6
+; CHECK-NEXT: vmv2r.v v10, v8
+; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
; CHECK-NEXT: vlseg4e16ff.v v6, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>, i32} @llvm.riscv.vlseg4ff.mask.nxv8i16(<vscale x 8 x i16> %val,<vscale x 8 x i16> %val,<vscale x 8 x i16> %val,<vscale x 8 x i16> %val, i16* %base, <vscale x 8 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vlseg2e8ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>, i32} @llvm.riscv.vlseg2ff.nxv8i8(i8* %base, i32 %vl)
; CHECK-NEXT: vlseg2e8ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>, i32} @llvm.riscv.vlseg2ff.mask.nxv8i8(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vlseg3e8ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i32} @llvm.riscv.vlseg3ff.nxv8i8(i8* %base, i32 %vl)
; CHECK-LABEL: test_vlseg3ff_mask_nxv8i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vlseg3e8ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i32} @llvm.riscv.vlseg3ff.mask.nxv8i8(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vlseg4e8ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i32} @llvm.riscv.vlseg4ff.nxv8i8(i8* %base, i32 %vl)
; CHECK-LABEL: test_vlseg4ff_mask_nxv8i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
-; CHECK-NEXT: vmv1r.v v10, v7
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vlseg4e8ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i32} @llvm.riscv.vlseg4ff.mask.nxv8i8(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vlseg5e8ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i32} @llvm.riscv.vlseg5ff.nxv8i8(i8* %base, i32 %vl)
; CHECK-LABEL: test_vlseg5ff_mask_nxv8i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
-; CHECK-NEXT: vmv1r.v v10, v7
-; CHECK-NEXT: vmv1r.v v11, v7
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vmv1r.v v11, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vlseg5e8ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i32} @llvm.riscv.vlseg5ff.mask.nxv8i8(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vlseg6e8ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i32} @llvm.riscv.vlseg6ff.nxv8i8(i8* %base, i32 %vl)
; CHECK-LABEL: test_vlseg6ff_mask_nxv8i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
-; CHECK-NEXT: vmv1r.v v10, v7
-; CHECK-NEXT: vmv1r.v v11, v7
-; CHECK-NEXT: vmv1r.v v12, v7
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vlseg6e8ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i32} @llvm.riscv.vlseg6ff.mask.nxv8i8(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vlseg7e8ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i32} @llvm.riscv.vlseg7ff.nxv8i8(i8* %base, i32 %vl)
; CHECK-LABEL: test_vlseg7ff_mask_nxv8i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
-; CHECK-NEXT: vmv1r.v v10, v7
-; CHECK-NEXT: vmv1r.v v11, v7
-; CHECK-NEXT: vmv1r.v v12, v7
-; CHECK-NEXT: vmv1r.v v13, v7
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vlseg7e8ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i32} @llvm.riscv.vlseg7ff.mask.nxv8i8(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vlseg8e8ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i32} @llvm.riscv.vlseg8ff.nxv8i8(i8* %base, i32 %vl)
; CHECK-LABEL: test_vlseg8ff_mask_nxv8i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
-; CHECK-NEXT: vmv1r.v v10, v7
-; CHECK-NEXT: vmv1r.v v11, v7
-; CHECK-NEXT: vmv1r.v v12, v7
-; CHECK-NEXT: vmv1r.v v13, v7
-; CHECK-NEXT: vmv1r.v v14, v7
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vlseg8e8ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i32} @llvm.riscv.vlseg8ff.mask.nxv8i8(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vlseg2e32ff.v v4, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i32>,<vscale x 8 x i32>, i32} @llvm.riscv.vlseg2ff.nxv8i32(i32* %base, i32 %vl)
; CHECK-NEXT: vlseg2e32ff.v v4, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i32>,<vscale x 8 x i32>, i32} @llvm.riscv.vlseg2ff.mask.nxv8i32(<vscale x 8 x i32> %val,<vscale x 8 x i32> %val, i32* %base, <vscale x 8 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vlseg2e8ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>, i32} @llvm.riscv.vlseg2ff.nxv4i8(i8* %base, i32 %vl)
; CHECK-NEXT: vlseg2e8ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>, i32} @llvm.riscv.vlseg2ff.mask.nxv4i8(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vlseg3e8ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i32} @llvm.riscv.vlseg3ff.nxv4i8(i8* %base, i32 %vl)
; CHECK-LABEL: test_vlseg3ff_mask_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vlseg3e8ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i32} @llvm.riscv.vlseg3ff.mask.nxv4i8(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vlseg4e8ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i32} @llvm.riscv.vlseg4ff.nxv4i8(i8* %base, i32 %vl)
; CHECK-LABEL: test_vlseg4ff_mask_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
-; CHECK-NEXT: vmv1r.v v10, v7
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vlseg4e8ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i32} @llvm.riscv.vlseg4ff.mask.nxv4i8(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vlseg5e8ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i32} @llvm.riscv.vlseg5ff.nxv4i8(i8* %base, i32 %vl)
; CHECK-LABEL: test_vlseg5ff_mask_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
-; CHECK-NEXT: vmv1r.v v10, v7
-; CHECK-NEXT: vmv1r.v v11, v7
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vmv1r.v v11, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vlseg5e8ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i32} @llvm.riscv.vlseg5ff.mask.nxv4i8(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vlseg6e8ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i32} @llvm.riscv.vlseg6ff.nxv4i8(i8* %base, i32 %vl)
; CHECK-LABEL: test_vlseg6ff_mask_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
-; CHECK-NEXT: vmv1r.v v10, v7
-; CHECK-NEXT: vmv1r.v v11, v7
-; CHECK-NEXT: vmv1r.v v12, v7
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vlseg6e8ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i32} @llvm.riscv.vlseg6ff.mask.nxv4i8(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vlseg7e8ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i32} @llvm.riscv.vlseg7ff.nxv4i8(i8* %base, i32 %vl)
; CHECK-LABEL: test_vlseg7ff_mask_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
-; CHECK-NEXT: vmv1r.v v10, v7
-; CHECK-NEXT: vmv1r.v v11, v7
-; CHECK-NEXT: vmv1r.v v12, v7
-; CHECK-NEXT: vmv1r.v v13, v7
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vlseg7e8ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i32} @llvm.riscv.vlseg7ff.mask.nxv4i8(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vlseg8e8ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i32} @llvm.riscv.vlseg8ff.nxv4i8(i8* %base, i32 %vl)
; CHECK-LABEL: test_vlseg8ff_mask_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
-; CHECK-NEXT: vmv1r.v v10, v7
-; CHECK-NEXT: vmv1r.v v11, v7
-; CHECK-NEXT: vmv1r.v v12, v7
-; CHECK-NEXT: vmv1r.v v13, v7
-; CHECK-NEXT: vmv1r.v v14, v7
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vlseg8e8ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i32} @llvm.riscv.vlseg8ff.mask.nxv4i8(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vlseg2e16ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>, i32} @llvm.riscv.vlseg2ff.nxv1i16(i16* %base, i32 %vl)
; CHECK-NEXT: vlseg2e16ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>, i32} @llvm.riscv.vlseg2ff.mask.nxv1i16(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vlseg3e16ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i32} @llvm.riscv.vlseg3ff.nxv1i16(i16* %base, i32 %vl)
; CHECK-LABEL: test_vlseg3ff_mask_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vlseg3e16ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i32} @llvm.riscv.vlseg3ff.mask.nxv1i16(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vlseg4e16ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i32} @llvm.riscv.vlseg4ff.nxv1i16(i16* %base, i32 %vl)
; CHECK-LABEL: test_vlseg4ff_mask_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
-; CHECK-NEXT: vmv1r.v v10, v7
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vlseg4e16ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i32} @llvm.riscv.vlseg4ff.mask.nxv1i16(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vlseg5e16ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i32} @llvm.riscv.vlseg5ff.nxv1i16(i16* %base, i32 %vl)
; CHECK-LABEL: test_vlseg5ff_mask_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
-; CHECK-NEXT: vmv1r.v v10, v7
-; CHECK-NEXT: vmv1r.v v11, v7
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vmv1r.v v11, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vlseg5e16ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i32} @llvm.riscv.vlseg5ff.mask.nxv1i16(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vlseg6e16ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i32} @llvm.riscv.vlseg6ff.nxv1i16(i16* %base, i32 %vl)
; CHECK-LABEL: test_vlseg6ff_mask_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
-; CHECK-NEXT: vmv1r.v v10, v7
-; CHECK-NEXT: vmv1r.v v11, v7
-; CHECK-NEXT: vmv1r.v v12, v7
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vlseg6e16ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i32} @llvm.riscv.vlseg6ff.mask.nxv1i16(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vlseg7e16ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i32} @llvm.riscv.vlseg7ff.nxv1i16(i16* %base, i32 %vl)
; CHECK-LABEL: test_vlseg7ff_mask_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
-; CHECK-NEXT: vmv1r.v v10, v7
-; CHECK-NEXT: vmv1r.v v11, v7
-; CHECK-NEXT: vmv1r.v v12, v7
-; CHECK-NEXT: vmv1r.v v13, v7
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vlseg7e16ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i32} @llvm.riscv.vlseg7ff.mask.nxv1i16(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vlseg8e16ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i32} @llvm.riscv.vlseg8ff.nxv1i16(i16* %base, i32 %vl)
; CHECK-LABEL: test_vlseg8ff_mask_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
-; CHECK-NEXT: vmv1r.v v10, v7
-; CHECK-NEXT: vmv1r.v v11, v7
-; CHECK-NEXT: vmv1r.v v12, v7
-; CHECK-NEXT: vmv1r.v v13, v7
-; CHECK-NEXT: vmv1r.v v14, v7
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vlseg8e16ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i32} @llvm.riscv.vlseg8ff.mask.nxv1i16(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vlseg2e8ff.v v4, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 32 x i8>,<vscale x 32 x i8>, i32} @llvm.riscv.vlseg2ff.nxv32i8(i8* %base, i32 %vl)
; CHECK-NEXT: vlseg2e8ff.v v4, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 32 x i8>,<vscale x 32 x i8>, i32} @llvm.riscv.vlseg2ff.mask.nxv32i8(<vscale x 32 x i8> %val,<vscale x 32 x i8> %val, i8* %base, <vscale x 32 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vlseg2e8ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>, i32} @llvm.riscv.vlseg2ff.nxv2i8(i8* %base, i32 %vl)
; CHECK-NEXT: vlseg2e8ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>, i32} @llvm.riscv.vlseg2ff.mask.nxv2i8(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, i8* %base, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vlseg3e8ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i32} @llvm.riscv.vlseg3ff.nxv2i8(i8* %base, i32 %vl)
; CHECK-LABEL: test_vlseg3ff_mask_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vlseg3e8ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i32} @llvm.riscv.vlseg3ff.mask.nxv2i8(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, i8* %base, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vlseg4e8ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i32} @llvm.riscv.vlseg4ff.nxv2i8(i8* %base, i32 %vl)
; CHECK-LABEL: test_vlseg4ff_mask_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
-; CHECK-NEXT: vmv1r.v v10, v7
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vlseg4e8ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i32} @llvm.riscv.vlseg4ff.mask.nxv2i8(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, i8* %base, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vlseg5e8ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i32} @llvm.riscv.vlseg5ff.nxv2i8(i8* %base, i32 %vl)
; CHECK-LABEL: test_vlseg5ff_mask_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
-; CHECK-NEXT: vmv1r.v v10, v7
-; CHECK-NEXT: vmv1r.v v11, v7
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vmv1r.v v11, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vlseg5e8ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i32} @llvm.riscv.vlseg5ff.mask.nxv2i8(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, i8* %base, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vlseg6e8ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i32} @llvm.riscv.vlseg6ff.nxv2i8(i8* %base, i32 %vl)
; CHECK-LABEL: test_vlseg6ff_mask_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
-; CHECK-NEXT: vmv1r.v v10, v7
-; CHECK-NEXT: vmv1r.v v11, v7
-; CHECK-NEXT: vmv1r.v v12, v7
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vlseg6e8ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i32} @llvm.riscv.vlseg6ff.mask.nxv2i8(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, i8* %base, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vlseg7e8ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i32} @llvm.riscv.vlseg7ff.nxv2i8(i8* %base, i32 %vl)
; CHECK-LABEL: test_vlseg7ff_mask_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
-; CHECK-NEXT: vmv1r.v v10, v7
-; CHECK-NEXT: vmv1r.v v11, v7
-; CHECK-NEXT: vmv1r.v v12, v7
-; CHECK-NEXT: vmv1r.v v13, v7
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vlseg7e8ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i32} @llvm.riscv.vlseg7ff.mask.nxv2i8(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, i8* %base, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vlseg8e8ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i32} @llvm.riscv.vlseg8ff.nxv2i8(i8* %base, i32 %vl)
; CHECK-LABEL: test_vlseg8ff_mask_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
-; CHECK-NEXT: vmv1r.v v10, v7
-; CHECK-NEXT: vmv1r.v v11, v7
-; CHECK-NEXT: vmv1r.v v12, v7
-; CHECK-NEXT: vmv1r.v v13, v7
-; CHECK-NEXT: vmv1r.v v14, v7
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vlseg8e8ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i32} @llvm.riscv.vlseg8ff.mask.nxv2i8(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, i8* %base, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vlseg2e16ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>, i32} @llvm.riscv.vlseg2ff.nxv2i16(i16* %base, i32 %vl)
; CHECK-NEXT: vlseg2e16ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>, i32} @llvm.riscv.vlseg2ff.mask.nxv2i16(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, i16* %base, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vlseg3e16ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i32} @llvm.riscv.vlseg3ff.nxv2i16(i16* %base, i32 %vl)
; CHECK-LABEL: test_vlseg3ff_mask_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vlseg3e16ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i32} @llvm.riscv.vlseg3ff.mask.nxv2i16(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, i16* %base, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vlseg4e16ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i32} @llvm.riscv.vlseg4ff.nxv2i16(i16* %base, i32 %vl)
; CHECK-LABEL: test_vlseg4ff_mask_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
-; CHECK-NEXT: vmv1r.v v10, v7
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vlseg4e16ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i32} @llvm.riscv.vlseg4ff.mask.nxv2i16(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, i16* %base, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vlseg5e16ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i32} @llvm.riscv.vlseg5ff.nxv2i16(i16* %base, i32 %vl)
; CHECK-LABEL: test_vlseg5ff_mask_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
-; CHECK-NEXT: vmv1r.v v10, v7
-; CHECK-NEXT: vmv1r.v v11, v7
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vmv1r.v v11, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vlseg5e16ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i32} @llvm.riscv.vlseg5ff.mask.nxv2i16(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, i16* %base, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vlseg6e16ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i32} @llvm.riscv.vlseg6ff.nxv2i16(i16* %base, i32 %vl)
; CHECK-LABEL: test_vlseg6ff_mask_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
-; CHECK-NEXT: vmv1r.v v10, v7
-; CHECK-NEXT: vmv1r.v v11, v7
-; CHECK-NEXT: vmv1r.v v12, v7
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vlseg6e16ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i32} @llvm.riscv.vlseg6ff.mask.nxv2i16(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, i16* %base, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vlseg7e16ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i32} @llvm.riscv.vlseg7ff.nxv2i16(i16* %base, i32 %vl)
; CHECK-LABEL: test_vlseg7ff_mask_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
-; CHECK-NEXT: vmv1r.v v10, v7
-; CHECK-NEXT: vmv1r.v v11, v7
-; CHECK-NEXT: vmv1r.v v12, v7
-; CHECK-NEXT: vmv1r.v v13, v7
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vlseg7e16ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i32} @llvm.riscv.vlseg7ff.mask.nxv2i16(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, i16* %base, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vlseg8e16ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i32} @llvm.riscv.vlseg8ff.nxv2i16(i16* %base, i32 %vl)
; CHECK-LABEL: test_vlseg8ff_mask_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
-; CHECK-NEXT: vmv1r.v v10, v7
-; CHECK-NEXT: vmv1r.v v11, v7
-; CHECK-NEXT: vmv1r.v v12, v7
-; CHECK-NEXT: vmv1r.v v13, v7
-; CHECK-NEXT: vmv1r.v v14, v7
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vlseg8e16ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i32} @llvm.riscv.vlseg8ff.mask.nxv2i16(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, i16* %base, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vlseg2e32ff.v v6, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>, i32} @llvm.riscv.vlseg2ff.nxv4i32(i32* %base, i32 %vl)
; CHECK-NEXT: vlseg2e32ff.v v6, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>, i32} @llvm.riscv.vlseg2ff.mask.nxv4i32(<vscale x 4 x i32> %val,<vscale x 4 x i32> %val, i32* %base, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vlseg3e32ff.v v6, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>, i32} @llvm.riscv.vlseg3ff.nxv4i32(i32* %base, i32 %vl)
; CHECK-LABEL: test_vlseg3ff_mask_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v6, v8
-; CHECK-NEXT: vmv2r.v v10, v6
+; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; CHECK-NEXT: vlseg3e32ff.v v6, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>, i32} @llvm.riscv.vlseg3ff.mask.nxv4i32(<vscale x 4 x i32> %val,<vscale x 4 x i32> %val,<vscale x 4 x i32> %val, i32* %base, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vlseg4e32ff.v v6, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>, i32} @llvm.riscv.vlseg4ff.nxv4i32(i32* %base, i32 %vl)
; CHECK-LABEL: test_vlseg4ff_mask_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v6, v8
-; CHECK-NEXT: vmv2r.v v10, v6
-; CHECK-NEXT: vmv2r.v v12, v6
+; CHECK-NEXT: vmv2r.v v10, v8
+; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; CHECK-NEXT: vlseg4e32ff.v v6, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>, i32} @llvm.riscv.vlseg4ff.mask.nxv4i32(<vscale x 4 x i32> %val,<vscale x 4 x i32> %val,<vscale x 4 x i32> %val,<vscale x 4 x i32> %val, i32* %base, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vlseg2e16ff.v v4, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 16 x half>,<vscale x 16 x half>, i32} @llvm.riscv.vlseg2ff.nxv16f16(half* %base, i32 %vl)
; CHECK-NEXT: vlseg2e16ff.v v4, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 16 x half>,<vscale x 16 x half>, i32} @llvm.riscv.vlseg2ff.mask.nxv16f16(<vscale x 16 x half> %val,<vscale x 16 x half> %val, half* %base, <vscale x 16 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vlseg2e64ff.v v4, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x double>,<vscale x 4 x double>, i32} @llvm.riscv.vlseg2ff.nxv4f64(double* %base, i32 %vl)
; CHECK-NEXT: vlseg2e64ff.v v4, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x double>,<vscale x 4 x double>, i32} @llvm.riscv.vlseg2ff.mask.nxv4f64(<vscale x 4 x double> %val,<vscale x 4 x double> %val, double* %base, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vlseg2e64ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>, i32} @llvm.riscv.vlseg2ff.nxv1f64(double* %base, i32 %vl)
; CHECK-NEXT: vlseg2e64ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>, i32} @llvm.riscv.vlseg2ff.mask.nxv1f64(<vscale x 1 x double> %val,<vscale x 1 x double> %val, double* %base, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vlseg3e64ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, i32} @llvm.riscv.vlseg3ff.nxv1f64(double* %base, i32 %vl)
; CHECK-LABEL: test_vlseg3ff_mask_nxv1f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vlseg3e64ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, i32} @llvm.riscv.vlseg3ff.mask.nxv1f64(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, double* %base, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vlseg4e64ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, i32} @llvm.riscv.vlseg4ff.nxv1f64(double* %base, i32 %vl)
; CHECK-LABEL: test_vlseg4ff_mask_nxv1f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
-; CHECK-NEXT: vmv1r.v v10, v7
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vlseg4e64ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, i32} @llvm.riscv.vlseg4ff.mask.nxv1f64(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, double* %base, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vlseg5e64ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, i32} @llvm.riscv.vlseg5ff.nxv1f64(double* %base, i32 %vl)
; CHECK-LABEL: test_vlseg5ff_mask_nxv1f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
-; CHECK-NEXT: vmv1r.v v10, v7
-; CHECK-NEXT: vmv1r.v v11, v7
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vmv1r.v v11, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vlseg5e64ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, i32} @llvm.riscv.vlseg5ff.mask.nxv1f64(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, double* %base, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vlseg6e64ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, i32} @llvm.riscv.vlseg6ff.nxv1f64(double* %base, i32 %vl)
; CHECK-LABEL: test_vlseg6ff_mask_nxv1f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
-; CHECK-NEXT: vmv1r.v v10, v7
-; CHECK-NEXT: vmv1r.v v11, v7
-; CHECK-NEXT: vmv1r.v v12, v7
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vlseg6e64ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, i32} @llvm.riscv.vlseg6ff.mask.nxv1f64(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, double* %base, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vlseg7e64ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, i32} @llvm.riscv.vlseg7ff.nxv1f64(double* %base, i32 %vl)
; CHECK-LABEL: test_vlseg7ff_mask_nxv1f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
-; CHECK-NEXT: vmv1r.v v10, v7
-; CHECK-NEXT: vmv1r.v v11, v7
-; CHECK-NEXT: vmv1r.v v12, v7
-; CHECK-NEXT: vmv1r.v v13, v7
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vlseg7e64ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, i32} @llvm.riscv.vlseg7ff.mask.nxv1f64(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, double* %base, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vlseg8e64ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, i32} @llvm.riscv.vlseg8ff.nxv1f64(double* %base, i32 %vl)
; CHECK-LABEL: test_vlseg8ff_mask_nxv1f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
-; CHECK-NEXT: vmv1r.v v10, v7
-; CHECK-NEXT: vmv1r.v v11, v7
-; CHECK-NEXT: vmv1r.v v12, v7
-; CHECK-NEXT: vmv1r.v v13, v7
-; CHECK-NEXT: vmv1r.v v14, v7
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vlseg8e64ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, i32} @llvm.riscv.vlseg8ff.mask.nxv1f64(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, double* %base, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vlseg2e32ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>, i32} @llvm.riscv.vlseg2ff.nxv2f32(float* %base, i32 %vl)
; CHECK-NEXT: vlseg2e32ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>, i32} @llvm.riscv.vlseg2ff.mask.nxv2f32(<vscale x 2 x float> %val,<vscale x 2 x float> %val, float* %base, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vlseg3e32ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, i32} @llvm.riscv.vlseg3ff.nxv2f32(float* %base, i32 %vl)
; CHECK-LABEL: test_vlseg3ff_mask_nxv2f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vlseg3e32ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, i32} @llvm.riscv.vlseg3ff.mask.nxv2f32(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, float* %base, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vlseg4e32ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, i32} @llvm.riscv.vlseg4ff.nxv2f32(float* %base, i32 %vl)
; CHECK-LABEL: test_vlseg4ff_mask_nxv2f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
-; CHECK-NEXT: vmv1r.v v10, v7
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vlseg4e32ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, i32} @llvm.riscv.vlseg4ff.mask.nxv2f32(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, float* %base, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vlseg5e32ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, i32} @llvm.riscv.vlseg5ff.nxv2f32(float* %base, i32 %vl)
; CHECK-LABEL: test_vlseg5ff_mask_nxv2f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
-; CHECK-NEXT: vmv1r.v v10, v7
-; CHECK-NEXT: vmv1r.v v11, v7
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vmv1r.v v11, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vlseg5e32ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, i32} @llvm.riscv.vlseg5ff.mask.nxv2f32(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, float* %base, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vlseg6e32ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, i32} @llvm.riscv.vlseg6ff.nxv2f32(float* %base, i32 %vl)
; CHECK-LABEL: test_vlseg6ff_mask_nxv2f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
-; CHECK-NEXT: vmv1r.v v10, v7
-; CHECK-NEXT: vmv1r.v v11, v7
-; CHECK-NEXT: vmv1r.v v12, v7
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vlseg6e32ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, i32} @llvm.riscv.vlseg6ff.mask.nxv2f32(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, float* %base, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vlseg7e32ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, i32} @llvm.riscv.vlseg7ff.nxv2f32(float* %base, i32 %vl)
; CHECK-LABEL: test_vlseg7ff_mask_nxv2f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
-; CHECK-NEXT: vmv1r.v v10, v7
-; CHECK-NEXT: vmv1r.v v11, v7
-; CHECK-NEXT: vmv1r.v v12, v7
-; CHECK-NEXT: vmv1r.v v13, v7
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vlseg7e32ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, i32} @llvm.riscv.vlseg7ff.mask.nxv2f32(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, float* %base, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vlseg8e32ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, i32} @llvm.riscv.vlseg8ff.nxv2f32(float* %base, i32 %vl)
; CHECK-LABEL: test_vlseg8ff_mask_nxv2f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
-; CHECK-NEXT: vmv1r.v v10, v7
-; CHECK-NEXT: vmv1r.v v11, v7
-; CHECK-NEXT: vmv1r.v v12, v7
-; CHECK-NEXT: vmv1r.v v13, v7
-; CHECK-NEXT: vmv1r.v v14, v7
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vlseg8e32ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, i32} @llvm.riscv.vlseg8ff.mask.nxv2f32(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, float* %base, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vlseg2e16ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>, i32} @llvm.riscv.vlseg2ff.nxv1f16(half* %base, i32 %vl)
; CHECK-NEXT: vlseg2e16ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>, i32} @llvm.riscv.vlseg2ff.mask.nxv1f16(<vscale x 1 x half> %val,<vscale x 1 x half> %val, half* %base, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vlseg3e16ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, i32} @llvm.riscv.vlseg3ff.nxv1f16(half* %base, i32 %vl)
; CHECK-LABEL: test_vlseg3ff_mask_nxv1f16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vlseg3e16ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, i32} @llvm.riscv.vlseg3ff.mask.nxv1f16(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, half* %base, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vlseg4e16ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, i32} @llvm.riscv.vlseg4ff.nxv1f16(half* %base, i32 %vl)
; CHECK-LABEL: test_vlseg4ff_mask_nxv1f16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
-; CHECK-NEXT: vmv1r.v v10, v7
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vlseg4e16ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, i32} @llvm.riscv.vlseg4ff.mask.nxv1f16(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, half* %base, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vlseg5e16ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, i32} @llvm.riscv.vlseg5ff.nxv1f16(half* %base, i32 %vl)
; CHECK-LABEL: test_vlseg5ff_mask_nxv1f16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
-; CHECK-NEXT: vmv1r.v v10, v7
-; CHECK-NEXT: vmv1r.v v11, v7
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vmv1r.v v11, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vlseg5e16ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, i32} @llvm.riscv.vlseg5ff.mask.nxv1f16(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, half* %base, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vlseg6e16ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, i32} @llvm.riscv.vlseg6ff.nxv1f16(half* %base, i32 %vl)
; CHECK-LABEL: test_vlseg6ff_mask_nxv1f16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
-; CHECK-NEXT: vmv1r.v v10, v7
-; CHECK-NEXT: vmv1r.v v11, v7
-; CHECK-NEXT: vmv1r.v v12, v7
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vlseg6e16ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, i32} @llvm.riscv.vlseg6ff.mask.nxv1f16(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, half* %base, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vlseg7e16ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, i32} @llvm.riscv.vlseg7ff.nxv1f16(half* %base, i32 %vl)
; CHECK-LABEL: test_vlseg7ff_mask_nxv1f16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
-; CHECK-NEXT: vmv1r.v v10, v7
-; CHECK-NEXT: vmv1r.v v11, v7
-; CHECK-NEXT: vmv1r.v v12, v7
-; CHECK-NEXT: vmv1r.v v13, v7
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vlseg7e16ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, i32} @llvm.riscv.vlseg7ff.mask.nxv1f16(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, half* %base, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vlseg8e16ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, i32} @llvm.riscv.vlseg8ff.nxv1f16(half* %base, i32 %vl)
; CHECK-LABEL: test_vlseg8ff_mask_nxv1f16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
-; CHECK-NEXT: vmv1r.v v10, v7
-; CHECK-NEXT: vmv1r.v v11, v7
-; CHECK-NEXT: vmv1r.v v12, v7
-; CHECK-NEXT: vmv1r.v v13, v7
-; CHECK-NEXT: vmv1r.v v14, v7
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vlseg8e16ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, i32} @llvm.riscv.vlseg8ff.mask.nxv1f16(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, half* %base, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vlseg2e32ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>, i32} @llvm.riscv.vlseg2ff.nxv1f32(float* %base, i32 %vl)
; CHECK-NEXT: vlseg2e32ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>, i32} @llvm.riscv.vlseg2ff.mask.nxv1f32(<vscale x 1 x float> %val,<vscale x 1 x float> %val, float* %base, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vlseg3e32ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, i32} @llvm.riscv.vlseg3ff.nxv1f32(float* %base, i32 %vl)
; CHECK-LABEL: test_vlseg3ff_mask_nxv1f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vlseg3e32ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, i32} @llvm.riscv.vlseg3ff.mask.nxv1f32(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, float* %base, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vlseg4e32ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, i32} @llvm.riscv.vlseg4ff.nxv1f32(float* %base, i32 %vl)
; CHECK-LABEL: test_vlseg4ff_mask_nxv1f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
-; CHECK-NEXT: vmv1r.v v10, v7
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vlseg4e32ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, i32} @llvm.riscv.vlseg4ff.mask.nxv1f32(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, float* %base, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vlseg5e32ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, i32} @llvm.riscv.vlseg5ff.nxv1f32(float* %base, i32 %vl)
; CHECK-LABEL: test_vlseg5ff_mask_nxv1f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
-; CHECK-NEXT: vmv1r.v v10, v7
-; CHECK-NEXT: vmv1r.v v11, v7
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vmv1r.v v11, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vlseg5e32ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, i32} @llvm.riscv.vlseg5ff.mask.nxv1f32(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, float* %base, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vlseg6e32ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, i32} @llvm.riscv.vlseg6ff.nxv1f32(float* %base, i32 %vl)
; CHECK-LABEL: test_vlseg6ff_mask_nxv1f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
-; CHECK-NEXT: vmv1r.v v10, v7
-; CHECK-NEXT: vmv1r.v v11, v7
-; CHECK-NEXT: vmv1r.v v12, v7
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vlseg6e32ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, i32} @llvm.riscv.vlseg6ff.mask.nxv1f32(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, float* %base, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vlseg7e32ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, i32} @llvm.riscv.vlseg7ff.nxv1f32(float* %base, i32 %vl)
; CHECK-LABEL: test_vlseg7ff_mask_nxv1f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
-; CHECK-NEXT: vmv1r.v v10, v7
-; CHECK-NEXT: vmv1r.v v11, v7
-; CHECK-NEXT: vmv1r.v v12, v7
-; CHECK-NEXT: vmv1r.v v13, v7
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vlseg7e32ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, i32} @llvm.riscv.vlseg7ff.mask.nxv1f32(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, float* %base, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vlseg8e32ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, i32} @llvm.riscv.vlseg8ff.nxv1f32(float* %base, i32 %vl)
; CHECK-LABEL: test_vlseg8ff_mask_nxv1f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
-; CHECK-NEXT: vmv1r.v v10, v7
-; CHECK-NEXT: vmv1r.v v11, v7
-; CHECK-NEXT: vmv1r.v v12, v7
-; CHECK-NEXT: vmv1r.v v13, v7
-; CHECK-NEXT: vmv1r.v v14, v7
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vlseg8e32ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, i32} @llvm.riscv.vlseg8ff.mask.nxv1f32(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, float* %base, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vlseg2e16ff.v v6, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>, i32} @llvm.riscv.vlseg2ff.nxv8f16(half* %base, i32 %vl)
; CHECK-NEXT: vlseg2e16ff.v v6, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>, i32} @llvm.riscv.vlseg2ff.mask.nxv8f16(<vscale x 8 x half> %val,<vscale x 8 x half> %val, half* %base, <vscale x 8 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vlseg3e16ff.v v6, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>, i32} @llvm.riscv.vlseg3ff.nxv8f16(half* %base, i32 %vl)
; CHECK-LABEL: test_vlseg3ff_mask_nxv8f16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v6, v8
-; CHECK-NEXT: vmv2r.v v10, v6
+; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
; CHECK-NEXT: vlseg3e16ff.v v6, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>, i32} @llvm.riscv.vlseg3ff.mask.nxv8f16(<vscale x 8 x half> %val,<vscale x 8 x half> %val,<vscale x 8 x half> %val, half* %base, <vscale x 8 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vlseg4e16ff.v v6, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>, i32} @llvm.riscv.vlseg4ff.nxv8f16(half* %base, i32 %vl)
; CHECK-LABEL: test_vlseg4ff_mask_nxv8f16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v6, v8
-; CHECK-NEXT: vmv2r.v v10, v6
-; CHECK-NEXT: vmv2r.v v12, v6
+; CHECK-NEXT: vmv2r.v v10, v8
+; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
; CHECK-NEXT: vlseg4e16ff.v v6, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>, i32} @llvm.riscv.vlseg4ff.mask.nxv8f16(<vscale x 8 x half> %val,<vscale x 8 x half> %val,<vscale x 8 x half> %val,<vscale x 8 x half> %val, half* %base, <vscale x 8 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vlseg2e32ff.v v4, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x float>,<vscale x 8 x float>, i32} @llvm.riscv.vlseg2ff.nxv8f32(float* %base, i32 %vl)
; CHECK-NEXT: vlseg2e32ff.v v4, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x float>,<vscale x 8 x float>, i32} @llvm.riscv.vlseg2ff.mask.nxv8f32(<vscale x 8 x float> %val,<vscale x 8 x float> %val, float* %base, <vscale x 8 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vlseg2e64ff.v v6, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>, i32} @llvm.riscv.vlseg2ff.nxv2f64(double* %base, i32 %vl)
; CHECK-NEXT: vlseg2e64ff.v v6, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>, i32} @llvm.riscv.vlseg2ff.mask.nxv2f64(<vscale x 2 x double> %val,<vscale x 2 x double> %val, double* %base, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vlseg3e64ff.v v6, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>, i32} @llvm.riscv.vlseg3ff.nxv2f64(double* %base, i32 %vl)
; CHECK-LABEL: test_vlseg3ff_mask_nxv2f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v6, v8
-; CHECK-NEXT: vmv2r.v v10, v6
+; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
; CHECK-NEXT: vlseg3e64ff.v v6, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>, i32} @llvm.riscv.vlseg3ff.mask.nxv2f64(<vscale x 2 x double> %val,<vscale x 2 x double> %val,<vscale x 2 x double> %val, double* %base, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vlseg4e64ff.v v6, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>, i32} @llvm.riscv.vlseg4ff.nxv2f64(double* %base, i32 %vl)
; CHECK-LABEL: test_vlseg4ff_mask_nxv2f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v6, v8
-; CHECK-NEXT: vmv2r.v v10, v6
-; CHECK-NEXT: vmv2r.v v12, v6
+; CHECK-NEXT: vmv2r.v v10, v8
+; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
; CHECK-NEXT: vlseg4e64ff.v v6, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>, i32} @llvm.riscv.vlseg4ff.mask.nxv2f64(<vscale x 2 x double> %val,<vscale x 2 x double> %val,<vscale x 2 x double> %val,<vscale x 2 x double> %val, double* %base, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vlseg2e16ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>, i32} @llvm.riscv.vlseg2ff.nxv4f16(half* %base, i32 %vl)
; CHECK-NEXT: vlseg2e16ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>, i32} @llvm.riscv.vlseg2ff.mask.nxv4f16(<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vlseg3e16ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, i32} @llvm.riscv.vlseg3ff.nxv4f16(half* %base, i32 %vl)
; CHECK-LABEL: test_vlseg3ff_mask_nxv4f16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vlseg3e16ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, i32} @llvm.riscv.vlseg3ff.mask.nxv4f16(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vlseg4e16ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, i32} @llvm.riscv.vlseg4ff.nxv4f16(half* %base, i32 %vl)
; CHECK-LABEL: test_vlseg4ff_mask_nxv4f16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
-; CHECK-NEXT: vmv1r.v v10, v7
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vlseg4e16ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, i32} @llvm.riscv.vlseg4ff.mask.nxv4f16(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vlseg5e16ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, i32} @llvm.riscv.vlseg5ff.nxv4f16(half* %base, i32 %vl)
; CHECK-LABEL: test_vlseg5ff_mask_nxv4f16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
-; CHECK-NEXT: vmv1r.v v10, v7
-; CHECK-NEXT: vmv1r.v v11, v7
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vmv1r.v v11, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vlseg5e16ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, i32} @llvm.riscv.vlseg5ff.mask.nxv4f16(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vlseg6e16ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, i32} @llvm.riscv.vlseg6ff.nxv4f16(half* %base, i32 %vl)
; CHECK-LABEL: test_vlseg6ff_mask_nxv4f16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
-; CHECK-NEXT: vmv1r.v v10, v7
-; CHECK-NEXT: vmv1r.v v11, v7
-; CHECK-NEXT: vmv1r.v v12, v7
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vlseg6e16ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, i32} @llvm.riscv.vlseg6ff.mask.nxv4f16(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vlseg7e16ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, i32} @llvm.riscv.vlseg7ff.nxv4f16(half* %base, i32 %vl)
; CHECK-LABEL: test_vlseg7ff_mask_nxv4f16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
-; CHECK-NEXT: vmv1r.v v10, v7
-; CHECK-NEXT: vmv1r.v v11, v7
-; CHECK-NEXT: vmv1r.v v12, v7
-; CHECK-NEXT: vmv1r.v v13, v7
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vlseg7e16ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, i32} @llvm.riscv.vlseg7ff.mask.nxv4f16(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vlseg8e16ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, i32} @llvm.riscv.vlseg8ff.nxv4f16(half* %base, i32 %vl)
; CHECK-LABEL: test_vlseg8ff_mask_nxv4f16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
-; CHECK-NEXT: vmv1r.v v10, v7
-; CHECK-NEXT: vmv1r.v v11, v7
-; CHECK-NEXT: vmv1r.v v12, v7
-; CHECK-NEXT: vmv1r.v v13, v7
-; CHECK-NEXT: vmv1r.v v14, v7
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vlseg8e16ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, i32} @llvm.riscv.vlseg8ff.mask.nxv4f16(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vlseg2e16ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>, i32} @llvm.riscv.vlseg2ff.nxv2f16(half* %base, i32 %vl)
; CHECK-NEXT: vlseg2e16ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>, i32} @llvm.riscv.vlseg2ff.mask.nxv2f16(<vscale x 2 x half> %val,<vscale x 2 x half> %val, half* %base, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vlseg3e16ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, i32} @llvm.riscv.vlseg3ff.nxv2f16(half* %base, i32 %vl)
; CHECK-LABEL: test_vlseg3ff_mask_nxv2f16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vlseg3e16ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, i32} @llvm.riscv.vlseg3ff.mask.nxv2f16(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, half* %base, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vlseg4e16ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, i32} @llvm.riscv.vlseg4ff.nxv2f16(half* %base, i32 %vl)
; CHECK-LABEL: test_vlseg4ff_mask_nxv2f16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
-; CHECK-NEXT: vmv1r.v v10, v7
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vlseg4e16ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, i32} @llvm.riscv.vlseg4ff.mask.nxv2f16(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, half* %base, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vlseg5e16ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, i32} @llvm.riscv.vlseg5ff.nxv2f16(half* %base, i32 %vl)
; CHECK-LABEL: test_vlseg5ff_mask_nxv2f16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
-; CHECK-NEXT: vmv1r.v v10, v7
-; CHECK-NEXT: vmv1r.v v11, v7
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vmv1r.v v11, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vlseg5e16ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, i32} @llvm.riscv.vlseg5ff.mask.nxv2f16(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, half* %base, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vlseg6e16ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, i32} @llvm.riscv.vlseg6ff.nxv2f16(half* %base, i32 %vl)
; CHECK-LABEL: test_vlseg6ff_mask_nxv2f16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
-; CHECK-NEXT: vmv1r.v v10, v7
-; CHECK-NEXT: vmv1r.v v11, v7
-; CHECK-NEXT: vmv1r.v v12, v7
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vlseg6e16ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, i32} @llvm.riscv.vlseg6ff.mask.nxv2f16(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, half* %base, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vlseg7e16ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, i32} @llvm.riscv.vlseg7ff.nxv2f16(half* %base, i32 %vl)
; CHECK-LABEL: test_vlseg7ff_mask_nxv2f16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
-; CHECK-NEXT: vmv1r.v v10, v7
-; CHECK-NEXT: vmv1r.v v11, v7
-; CHECK-NEXT: vmv1r.v v12, v7
-; CHECK-NEXT: vmv1r.v v13, v7
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vlseg7e16ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, i32} @llvm.riscv.vlseg7ff.mask.nxv2f16(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, half* %base, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vlseg8e16ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, i32} @llvm.riscv.vlseg8ff.nxv2f16(half* %base, i32 %vl)
; CHECK-LABEL: test_vlseg8ff_mask_nxv2f16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
-; CHECK-NEXT: vmv1r.v v10, v7
-; CHECK-NEXT: vmv1r.v v11, v7
-; CHECK-NEXT: vmv1r.v v12, v7
-; CHECK-NEXT: vmv1r.v v13, v7
-; CHECK-NEXT: vmv1r.v v14, v7
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vlseg8e16ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, i32} @llvm.riscv.vlseg8ff.mask.nxv2f16(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, half* %base, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vlseg2e32ff.v v6, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>, i32} @llvm.riscv.vlseg2ff.nxv4f32(float* %base, i32 %vl)
; CHECK-NEXT: vlseg2e32ff.v v6, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>, i32} @llvm.riscv.vlseg2ff.mask.nxv4f32(<vscale x 4 x float> %val,<vscale x 4 x float> %val, float* %base, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vlseg3e32ff.v v6, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>, i32} @llvm.riscv.vlseg3ff.nxv4f32(float* %base, i32 %vl)
; CHECK-LABEL: test_vlseg3ff_mask_nxv4f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v6, v8
-; CHECK-NEXT: vmv2r.v v10, v6
+; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; CHECK-NEXT: vlseg3e32ff.v v6, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>, i32} @llvm.riscv.vlseg3ff.mask.nxv4f32(<vscale x 4 x float> %val,<vscale x 4 x float> %val,<vscale x 4 x float> %val, float* %base, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vlseg4e32ff.v v6, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>, i32} @llvm.riscv.vlseg4ff.nxv4f32(float* %base, i32 %vl)
; CHECK-LABEL: test_vlseg4ff_mask_nxv4f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v6, v8
-; CHECK-NEXT: vmv2r.v v10, v6
-; CHECK-NEXT: vmv2r.v v12, v6
+; CHECK-NEXT: vmv2r.v v10, v8
+; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; CHECK-NEXT: vlseg4e32ff.v v6, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sw a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>, i32} @llvm.riscv.vlseg4ff.mask.nxv4f32(<vscale x 4 x float> %val,<vscale x 4 x float> %val,<vscale x 4 x float> %val,<vscale x 4 x float> %val, float* %base, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
define void @test_vlseg2ff_mask_dead_value(<vscale x 16 x i16> %val, i16* %base, i64 %vl, <vscale x 16 x i1> %mask, i64* %outvl) {
; CHECK-LABEL: test_vlseg2ff_mask_dead_value:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
; CHECK-NEXT: vlseg2e16ff.v v8, (a0), v0.t
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
; CHECK-NEXT: vlseg2e16ff.v v4, (a0)
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 16 x i16>,<vscale x 16 x i16>, i64} @llvm.riscv.vlseg2ff.nxv16i16(i16* %base, i64 %vl)
; CHECK-NEXT: vmv4r.v v4, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
; CHECK-NEXT: vlseg2e16ff.v v4, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 16 x i16>,<vscale x 16 x i16>, i64} @llvm.riscv.vlseg2ff.mask.nxv16i16(<vscale x 16 x i16> %val,<vscale x 16 x i16> %val, i16* %base, <vscale x 16 x i1> %mask, i64 %vl, i64 1)
define void @test_vlseg2ff_mask_dead_all(<vscale x 16 x i16> %val, i16* %base, i64 %vl, <vscale x 16 x i1> %mask) {
; CHECK-LABEL: test_vlseg2ff_mask_dead_all:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
; CHECK-NEXT: vlseg2e16ff.v v8, (a0), v0.t
; CHECK-NEXT: vlseg2e16ff.v v4, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 16 x i16>,<vscale x 16 x i16>, i64} @llvm.riscv.vlseg2ff.nxv16i16(i16* %base, i64 %vl)
; CHECK-NEXT: vlseg2e16ff.v v4, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 16 x i16>,<vscale x 16 x i16>, i64} @llvm.riscv.vlseg2ff.mask.nxv16i16(<vscale x 16 x i16> %val,<vscale x 16 x i16> %val, i16* %base, <vscale x 16 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vlseg2e32ff.v v6, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>, i64} @llvm.riscv.vlseg2ff.nxv4i32(i32* %base, i64 %vl)
; CHECK-NEXT: vlseg2e32ff.v v6, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>, i64} @llvm.riscv.vlseg2ff.mask.nxv4i32(<vscale x 4 x i32> %val,<vscale x 4 x i32> %val, i32* %base, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vlseg3e32ff.v v6, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>, i64} @llvm.riscv.vlseg3ff.nxv4i32(i32* %base, i64 %vl)
; CHECK-LABEL: test_vlseg3ff_mask_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v6, v8
-; CHECK-NEXT: vmv2r.v v10, v6
+; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; CHECK-NEXT: vlseg3e32ff.v v6, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>, i64} @llvm.riscv.vlseg3ff.mask.nxv4i32(<vscale x 4 x i32> %val,<vscale x 4 x i32> %val,<vscale x 4 x i32> %val, i32* %base, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vlseg4e32ff.v v6, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>, i64} @llvm.riscv.vlseg4ff.nxv4i32(i32* %base, i64 %vl)
; CHECK-LABEL: test_vlseg4ff_mask_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v6, v8
-; CHECK-NEXT: vmv2r.v v10, v6
-; CHECK-NEXT: vmv2r.v v12, v6
+; CHECK-NEXT: vmv2r.v v10, v8
+; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; CHECK-NEXT: vlseg4e32ff.v v6, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>, i64} @llvm.riscv.vlseg4ff.mask.nxv4i32(<vscale x 4 x i32> %val,<vscale x 4 x i32> %val,<vscale x 4 x i32> %val,<vscale x 4 x i32> %val, i32* %base, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vlseg2e8ff.v v6, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>, i64} @llvm.riscv.vlseg2ff.nxv16i8(i8* %base, i64 %vl)
; CHECK-NEXT: vlseg2e8ff.v v6, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>, i64} @llvm.riscv.vlseg2ff.mask.nxv16i8(<vscale x 16 x i8> %val,<vscale x 16 x i8> %val, i8* %base, <vscale x 16 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vlseg3e8ff.v v6, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>, i64} @llvm.riscv.vlseg3ff.nxv16i8(i8* %base, i64 %vl)
; CHECK-LABEL: test_vlseg3ff_mask_nxv16i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v6, v8
-; CHECK-NEXT: vmv2r.v v10, v6
+; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
; CHECK-NEXT: vlseg3e8ff.v v6, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>, i64} @llvm.riscv.vlseg3ff.mask.nxv16i8(<vscale x 16 x i8> %val,<vscale x 16 x i8> %val,<vscale x 16 x i8> %val, i8* %base, <vscale x 16 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vlseg4e8ff.v v6, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>, i64} @llvm.riscv.vlseg4ff.nxv16i8(i8* %base, i64 %vl)
; CHECK-LABEL: test_vlseg4ff_mask_nxv16i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v6, v8
-; CHECK-NEXT: vmv2r.v v10, v6
-; CHECK-NEXT: vmv2r.v v12, v6
+; CHECK-NEXT: vmv2r.v v10, v8
+; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
; CHECK-NEXT: vlseg4e8ff.v v6, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>, i64} @llvm.riscv.vlseg4ff.mask.nxv16i8(<vscale x 16 x i8> %val,<vscale x 16 x i8> %val,<vscale x 16 x i8> %val,<vscale x 16 x i8> %val, i8* %base, <vscale x 16 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vlseg2e64ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i64>,<vscale x 1 x i64>, i64} @llvm.riscv.vlseg2ff.nxv1i64(i64* %base, i64 %vl)
; CHECK-NEXT: vlseg2e64ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i64>,<vscale x 1 x i64>, i64} @llvm.riscv.vlseg2ff.mask.nxv1i64(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, i64* %base, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vlseg3e64ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, i64} @llvm.riscv.vlseg3ff.nxv1i64(i64* %base, i64 %vl)
; CHECK-LABEL: test_vlseg3ff_mask_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vlseg3e64ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, i64} @llvm.riscv.vlseg3ff.mask.nxv1i64(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, i64* %base, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vlseg4e64ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, i64} @llvm.riscv.vlseg4ff.nxv1i64(i64* %base, i64 %vl)
; CHECK-LABEL: test_vlseg4ff_mask_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
-; CHECK-NEXT: vmv1r.v v10, v7
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vlseg4e64ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, i64} @llvm.riscv.vlseg4ff.mask.nxv1i64(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, i64* %base, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vlseg5e64ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, i64} @llvm.riscv.vlseg5ff.nxv1i64(i64* %base, i64 %vl)
; CHECK-LABEL: test_vlseg5ff_mask_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
-; CHECK-NEXT: vmv1r.v v10, v7
-; CHECK-NEXT: vmv1r.v v11, v7
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vmv1r.v v11, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vlseg5e64ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, i64} @llvm.riscv.vlseg5ff.mask.nxv1i64(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, i64* %base, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vlseg6e64ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, i64} @llvm.riscv.vlseg6ff.nxv1i64(i64* %base, i64 %vl)
; CHECK-LABEL: test_vlseg6ff_mask_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
-; CHECK-NEXT: vmv1r.v v10, v7
-; CHECK-NEXT: vmv1r.v v11, v7
-; CHECK-NEXT: vmv1r.v v12, v7
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vlseg6e64ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, i64} @llvm.riscv.vlseg6ff.mask.nxv1i64(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, i64* %base, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vlseg7e64ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, i64} @llvm.riscv.vlseg7ff.nxv1i64(i64* %base, i64 %vl)
; CHECK-LABEL: test_vlseg7ff_mask_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
-; CHECK-NEXT: vmv1r.v v10, v7
-; CHECK-NEXT: vmv1r.v v11, v7
-; CHECK-NEXT: vmv1r.v v12, v7
-; CHECK-NEXT: vmv1r.v v13, v7
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vlseg7e64ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, i64} @llvm.riscv.vlseg7ff.mask.nxv1i64(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, i64* %base, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vlseg8e64ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, i64} @llvm.riscv.vlseg8ff.nxv1i64(i64* %base, i64 %vl)
; CHECK-LABEL: test_vlseg8ff_mask_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
-; CHECK-NEXT: vmv1r.v v10, v7
-; CHECK-NEXT: vmv1r.v v11, v7
-; CHECK-NEXT: vmv1r.v v12, v7
-; CHECK-NEXT: vmv1r.v v13, v7
-; CHECK-NEXT: vmv1r.v v14, v7
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vlseg8e64ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, i64} @llvm.riscv.vlseg8ff.mask.nxv1i64(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, i64* %base, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vlseg2e32ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>, i64} @llvm.riscv.vlseg2ff.nxv1i32(i32* %base, i64 %vl)
; CHECK-NEXT: vlseg2e32ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>, i64} @llvm.riscv.vlseg2ff.mask.nxv1i32(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vlseg3e32ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i64} @llvm.riscv.vlseg3ff.nxv1i32(i32* %base, i64 %vl)
; CHECK-LABEL: test_vlseg3ff_mask_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vlseg3e32ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i64} @llvm.riscv.vlseg3ff.mask.nxv1i32(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vlseg4e32ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i64} @llvm.riscv.vlseg4ff.nxv1i32(i32* %base, i64 %vl)
; CHECK-LABEL: test_vlseg4ff_mask_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
-; CHECK-NEXT: vmv1r.v v10, v7
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vlseg4e32ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i64} @llvm.riscv.vlseg4ff.mask.nxv1i32(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vlseg5e32ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i64} @llvm.riscv.vlseg5ff.nxv1i32(i32* %base, i64 %vl)
; CHECK-LABEL: test_vlseg5ff_mask_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
-; CHECK-NEXT: vmv1r.v v10, v7
-; CHECK-NEXT: vmv1r.v v11, v7
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vmv1r.v v11, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vlseg5e32ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i64} @llvm.riscv.vlseg5ff.mask.nxv1i32(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vlseg6e32ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i64} @llvm.riscv.vlseg6ff.nxv1i32(i32* %base, i64 %vl)
; CHECK-LABEL: test_vlseg6ff_mask_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
-; CHECK-NEXT: vmv1r.v v10, v7
-; CHECK-NEXT: vmv1r.v v11, v7
-; CHECK-NEXT: vmv1r.v v12, v7
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vlseg6e32ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i64} @llvm.riscv.vlseg6ff.mask.nxv1i32(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vlseg7e32ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i64} @llvm.riscv.vlseg7ff.nxv1i32(i32* %base, i64 %vl)
; CHECK-LABEL: test_vlseg7ff_mask_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
-; CHECK-NEXT: vmv1r.v v10, v7
-; CHECK-NEXT: vmv1r.v v11, v7
-; CHECK-NEXT: vmv1r.v v12, v7
-; CHECK-NEXT: vmv1r.v v13, v7
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vlseg7e32ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i64} @llvm.riscv.vlseg7ff.mask.nxv1i32(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vlseg8e32ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i64} @llvm.riscv.vlseg8ff.nxv1i32(i32* %base, i64 %vl)
; CHECK-LABEL: test_vlseg8ff_mask_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
-; CHECK-NEXT: vmv1r.v v10, v7
-; CHECK-NEXT: vmv1r.v v11, v7
-; CHECK-NEXT: vmv1r.v v12, v7
-; CHECK-NEXT: vmv1r.v v13, v7
-; CHECK-NEXT: vmv1r.v v14, v7
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vlseg8e32ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i64} @llvm.riscv.vlseg8ff.mask.nxv1i32(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vlseg2e16ff.v v6, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>, i64} @llvm.riscv.vlseg2ff.nxv8i16(i16* %base, i64 %vl)
; CHECK-NEXT: vlseg2e16ff.v v6, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>, i64} @llvm.riscv.vlseg2ff.mask.nxv8i16(<vscale x 8 x i16> %val,<vscale x 8 x i16> %val, i16* %base, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vlseg3e16ff.v v6, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>, i64} @llvm.riscv.vlseg3ff.nxv8i16(i16* %base, i64 %vl)
; CHECK-LABEL: test_vlseg3ff_mask_nxv8i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v6, v8
-; CHECK-NEXT: vmv2r.v v10, v6
+; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
; CHECK-NEXT: vlseg3e16ff.v v6, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>, i64} @llvm.riscv.vlseg3ff.mask.nxv8i16(<vscale x 8 x i16> %val,<vscale x 8 x i16> %val,<vscale x 8 x i16> %val, i16* %base, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vlseg4e16ff.v v6, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>, i64} @llvm.riscv.vlseg4ff.nxv8i16(i16* %base, i64 %vl)
; CHECK-LABEL: test_vlseg4ff_mask_nxv8i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v6, v8
-; CHECK-NEXT: vmv2r.v v10, v6
-; CHECK-NEXT: vmv2r.v v12, v6
+; CHECK-NEXT: vmv2r.v v10, v8
+; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
; CHECK-NEXT: vlseg4e16ff.v v6, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>, i64} @llvm.riscv.vlseg4ff.mask.nxv8i16(<vscale x 8 x i16> %val,<vscale x 8 x i16> %val,<vscale x 8 x i16> %val,<vscale x 8 x i16> %val, i16* %base, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vlseg2e8ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>, i64} @llvm.riscv.vlseg2ff.nxv4i8(i8* %base, i64 %vl)
; CHECK-NEXT: vlseg2e8ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>, i64} @llvm.riscv.vlseg2ff.mask.nxv4i8(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vlseg3e8ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i64} @llvm.riscv.vlseg3ff.nxv4i8(i8* %base, i64 %vl)
; CHECK-LABEL: test_vlseg3ff_mask_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vlseg3e8ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i64} @llvm.riscv.vlseg3ff.mask.nxv4i8(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vlseg4e8ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i64} @llvm.riscv.vlseg4ff.nxv4i8(i8* %base, i64 %vl)
; CHECK-LABEL: test_vlseg4ff_mask_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
-; CHECK-NEXT: vmv1r.v v10, v7
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vlseg4e8ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i64} @llvm.riscv.vlseg4ff.mask.nxv4i8(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vlseg5e8ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i64} @llvm.riscv.vlseg5ff.nxv4i8(i8* %base, i64 %vl)
; CHECK-LABEL: test_vlseg5ff_mask_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
-; CHECK-NEXT: vmv1r.v v10, v7
-; CHECK-NEXT: vmv1r.v v11, v7
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vmv1r.v v11, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vlseg5e8ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i64} @llvm.riscv.vlseg5ff.mask.nxv4i8(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vlseg6e8ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i64} @llvm.riscv.vlseg6ff.nxv4i8(i8* %base, i64 %vl)
; CHECK-LABEL: test_vlseg6ff_mask_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
-; CHECK-NEXT: vmv1r.v v10, v7
-; CHECK-NEXT: vmv1r.v v11, v7
-; CHECK-NEXT: vmv1r.v v12, v7
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vlseg6e8ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i64} @llvm.riscv.vlseg6ff.mask.nxv4i8(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vlseg7e8ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i64} @llvm.riscv.vlseg7ff.nxv4i8(i8* %base, i64 %vl)
; CHECK-LABEL: test_vlseg7ff_mask_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
-; CHECK-NEXT: vmv1r.v v10, v7
-; CHECK-NEXT: vmv1r.v v11, v7
-; CHECK-NEXT: vmv1r.v v12, v7
-; CHECK-NEXT: vmv1r.v v13, v7
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vlseg7e8ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i64} @llvm.riscv.vlseg7ff.mask.nxv4i8(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vlseg8e8ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i64} @llvm.riscv.vlseg8ff.nxv4i8(i8* %base, i64 %vl)
; CHECK-LABEL: test_vlseg8ff_mask_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
-; CHECK-NEXT: vmv1r.v v10, v7
-; CHECK-NEXT: vmv1r.v v11, v7
-; CHECK-NEXT: vmv1r.v v12, v7
-; CHECK-NEXT: vmv1r.v v13, v7
-; CHECK-NEXT: vmv1r.v v14, v7
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vlseg8e8ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i64} @llvm.riscv.vlseg8ff.mask.nxv4i8(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vlseg2e16ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>, i64} @llvm.riscv.vlseg2ff.nxv1i16(i16* %base, i64 %vl)
; CHECK-NEXT: vlseg2e16ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>, i64} @llvm.riscv.vlseg2ff.mask.nxv1i16(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vlseg3e16ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i64} @llvm.riscv.vlseg3ff.nxv1i16(i16* %base, i64 %vl)
; CHECK-LABEL: test_vlseg3ff_mask_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vlseg3e16ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i64} @llvm.riscv.vlseg3ff.mask.nxv1i16(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vlseg4e16ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i64} @llvm.riscv.vlseg4ff.nxv1i16(i16* %base, i64 %vl)
; CHECK-LABEL: test_vlseg4ff_mask_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
-; CHECK-NEXT: vmv1r.v v10, v7
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vlseg4e16ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i64} @llvm.riscv.vlseg4ff.mask.nxv1i16(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vlseg5e16ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i64} @llvm.riscv.vlseg5ff.nxv1i16(i16* %base, i64 %vl)
; CHECK-LABEL: test_vlseg5ff_mask_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
-; CHECK-NEXT: vmv1r.v v10, v7
-; CHECK-NEXT: vmv1r.v v11, v7
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vmv1r.v v11, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vlseg5e16ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i64} @llvm.riscv.vlseg5ff.mask.nxv1i16(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vlseg6e16ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i64} @llvm.riscv.vlseg6ff.nxv1i16(i16* %base, i64 %vl)
; CHECK-LABEL: test_vlseg6ff_mask_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
-; CHECK-NEXT: vmv1r.v v10, v7
-; CHECK-NEXT: vmv1r.v v11, v7
-; CHECK-NEXT: vmv1r.v v12, v7
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vlseg6e16ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i64} @llvm.riscv.vlseg6ff.mask.nxv1i16(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vlseg7e16ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i64} @llvm.riscv.vlseg7ff.nxv1i16(i16* %base, i64 %vl)
; CHECK-LABEL: test_vlseg7ff_mask_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
-; CHECK-NEXT: vmv1r.v v10, v7
-; CHECK-NEXT: vmv1r.v v11, v7
-; CHECK-NEXT: vmv1r.v v12, v7
-; CHECK-NEXT: vmv1r.v v13, v7
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vlseg7e16ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i64} @llvm.riscv.vlseg7ff.mask.nxv1i16(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vlseg8e16ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i64} @llvm.riscv.vlseg8ff.nxv1i16(i16* %base, i64 %vl)
; CHECK-LABEL: test_vlseg8ff_mask_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
-; CHECK-NEXT: vmv1r.v v10, v7
-; CHECK-NEXT: vmv1r.v v11, v7
-; CHECK-NEXT: vmv1r.v v12, v7
-; CHECK-NEXT: vmv1r.v v13, v7
-; CHECK-NEXT: vmv1r.v v14, v7
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vlseg8e16ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i64} @llvm.riscv.vlseg8ff.mask.nxv1i16(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vlseg2e32ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>, i64} @llvm.riscv.vlseg2ff.nxv2i32(i32* %base, i64 %vl)
; CHECK-NEXT: vlseg2e32ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>, i64} @llvm.riscv.vlseg2ff.mask.nxv2i32(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vlseg3e32ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i64} @llvm.riscv.vlseg3ff.nxv2i32(i32* %base, i64 %vl)
; CHECK-LABEL: test_vlseg3ff_mask_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vlseg3e32ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i64} @llvm.riscv.vlseg3ff.mask.nxv2i32(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vlseg4e32ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i64} @llvm.riscv.vlseg4ff.nxv2i32(i32* %base, i64 %vl)
; CHECK-LABEL: test_vlseg4ff_mask_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
-; CHECK-NEXT: vmv1r.v v10, v7
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vlseg4e32ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i64} @llvm.riscv.vlseg4ff.mask.nxv2i32(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vlseg5e32ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i64} @llvm.riscv.vlseg5ff.nxv2i32(i32* %base, i64 %vl)
; CHECK-LABEL: test_vlseg5ff_mask_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
-; CHECK-NEXT: vmv1r.v v10, v7
-; CHECK-NEXT: vmv1r.v v11, v7
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vmv1r.v v11, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vlseg5e32ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i64} @llvm.riscv.vlseg5ff.mask.nxv2i32(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vlseg6e32ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i64} @llvm.riscv.vlseg6ff.nxv2i32(i32* %base, i64 %vl)
; CHECK-LABEL: test_vlseg6ff_mask_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
-; CHECK-NEXT: vmv1r.v v10, v7
-; CHECK-NEXT: vmv1r.v v11, v7
-; CHECK-NEXT: vmv1r.v v12, v7
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vlseg6e32ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i64} @llvm.riscv.vlseg6ff.mask.nxv2i32(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vlseg7e32ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i64} @llvm.riscv.vlseg7ff.nxv2i32(i32* %base, i64 %vl)
; CHECK-LABEL: test_vlseg7ff_mask_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
-; CHECK-NEXT: vmv1r.v v10, v7
-; CHECK-NEXT: vmv1r.v v11, v7
-; CHECK-NEXT: vmv1r.v v12, v7
-; CHECK-NEXT: vmv1r.v v13, v7
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vlseg7e32ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i64} @llvm.riscv.vlseg7ff.mask.nxv2i32(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vlseg8e32ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i64} @llvm.riscv.vlseg8ff.nxv2i32(i32* %base, i64 %vl)
; CHECK-LABEL: test_vlseg8ff_mask_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
-; CHECK-NEXT: vmv1r.v v10, v7
-; CHECK-NEXT: vmv1r.v v11, v7
-; CHECK-NEXT: vmv1r.v v12, v7
-; CHECK-NEXT: vmv1r.v v13, v7
-; CHECK-NEXT: vmv1r.v v14, v7
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vlseg8e32ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i64} @llvm.riscv.vlseg8ff.mask.nxv2i32(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vlseg2e8ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>, i64} @llvm.riscv.vlseg2ff.nxv8i8(i8* %base, i64 %vl)
; CHECK-NEXT: vlseg2e8ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>, i64} @llvm.riscv.vlseg2ff.mask.nxv8i8(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vlseg3e8ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i64} @llvm.riscv.vlseg3ff.nxv8i8(i8* %base, i64 %vl)
; CHECK-LABEL: test_vlseg3ff_mask_nxv8i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vlseg3e8ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i64} @llvm.riscv.vlseg3ff.mask.nxv8i8(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vlseg4e8ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i64} @llvm.riscv.vlseg4ff.nxv8i8(i8* %base, i64 %vl)
; CHECK-LABEL: test_vlseg4ff_mask_nxv8i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
-; CHECK-NEXT: vmv1r.v v10, v7
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vlseg4e8ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i64} @llvm.riscv.vlseg4ff.mask.nxv8i8(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vlseg5e8ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i64} @llvm.riscv.vlseg5ff.nxv8i8(i8* %base, i64 %vl)
; CHECK-LABEL: test_vlseg5ff_mask_nxv8i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
-; CHECK-NEXT: vmv1r.v v10, v7
-; CHECK-NEXT: vmv1r.v v11, v7
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vmv1r.v v11, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vlseg5e8ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i64} @llvm.riscv.vlseg5ff.mask.nxv8i8(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vlseg6e8ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i64} @llvm.riscv.vlseg6ff.nxv8i8(i8* %base, i64 %vl)
; CHECK-LABEL: test_vlseg6ff_mask_nxv8i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
-; CHECK-NEXT: vmv1r.v v10, v7
-; CHECK-NEXT: vmv1r.v v11, v7
-; CHECK-NEXT: vmv1r.v v12, v7
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vlseg6e8ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i64} @llvm.riscv.vlseg6ff.mask.nxv8i8(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vlseg7e8ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i64} @llvm.riscv.vlseg7ff.nxv8i8(i8* %base, i64 %vl)
; CHECK-LABEL: test_vlseg7ff_mask_nxv8i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
-; CHECK-NEXT: vmv1r.v v10, v7
-; CHECK-NEXT: vmv1r.v v11, v7
-; CHECK-NEXT: vmv1r.v v12, v7
-; CHECK-NEXT: vmv1r.v v13, v7
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vlseg7e8ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i64} @llvm.riscv.vlseg7ff.mask.nxv8i8(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vlseg8e8ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i64} @llvm.riscv.vlseg8ff.nxv8i8(i8* %base, i64 %vl)
; CHECK-LABEL: test_vlseg8ff_mask_nxv8i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
-; CHECK-NEXT: vmv1r.v v10, v7
-; CHECK-NEXT: vmv1r.v v11, v7
-; CHECK-NEXT: vmv1r.v v12, v7
-; CHECK-NEXT: vmv1r.v v13, v7
-; CHECK-NEXT: vmv1r.v v14, v7
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vlseg8e8ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i64} @llvm.riscv.vlseg8ff.mask.nxv8i8(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vlseg2e64ff.v v4, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i64>,<vscale x 4 x i64>, i64} @llvm.riscv.vlseg2ff.nxv4i64(i64* %base, i64 %vl)
; CHECK-NEXT: vlseg2e64ff.v v4, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i64>,<vscale x 4 x i64>, i64} @llvm.riscv.vlseg2ff.mask.nxv4i64(<vscale x 4 x i64> %val,<vscale x 4 x i64> %val, i64* %base, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vlseg2e16ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>, i64} @llvm.riscv.vlseg2ff.nxv4i16(i16* %base, i64 %vl)
; CHECK-NEXT: vlseg2e16ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>, i64} @llvm.riscv.vlseg2ff.mask.nxv4i16(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vlseg3e16ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i64} @llvm.riscv.vlseg3ff.nxv4i16(i16* %base, i64 %vl)
; CHECK-LABEL: test_vlseg3ff_mask_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vlseg3e16ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i64} @llvm.riscv.vlseg3ff.mask.nxv4i16(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vlseg4e16ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i64} @llvm.riscv.vlseg4ff.nxv4i16(i16* %base, i64 %vl)
; CHECK-LABEL: test_vlseg4ff_mask_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
-; CHECK-NEXT: vmv1r.v v10, v7
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vlseg4e16ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i64} @llvm.riscv.vlseg4ff.mask.nxv4i16(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vlseg5e16ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i64} @llvm.riscv.vlseg5ff.nxv4i16(i16* %base, i64 %vl)
; CHECK-LABEL: test_vlseg5ff_mask_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
-; CHECK-NEXT: vmv1r.v v10, v7
-; CHECK-NEXT: vmv1r.v v11, v7
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vmv1r.v v11, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vlseg5e16ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i64} @llvm.riscv.vlseg5ff.mask.nxv4i16(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vlseg6e16ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i64} @llvm.riscv.vlseg6ff.nxv4i16(i16* %base, i64 %vl)
; CHECK-LABEL: test_vlseg6ff_mask_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
-; CHECK-NEXT: vmv1r.v v10, v7
-; CHECK-NEXT: vmv1r.v v11, v7
-; CHECK-NEXT: vmv1r.v v12, v7
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vlseg6e16ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i64} @llvm.riscv.vlseg6ff.mask.nxv4i16(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vlseg7e16ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i64} @llvm.riscv.vlseg7ff.nxv4i16(i16* %base, i64 %vl)
; CHECK-LABEL: test_vlseg7ff_mask_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
-; CHECK-NEXT: vmv1r.v v10, v7
-; CHECK-NEXT: vmv1r.v v11, v7
-; CHECK-NEXT: vmv1r.v v12, v7
-; CHECK-NEXT: vmv1r.v v13, v7
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vlseg7e16ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i64} @llvm.riscv.vlseg7ff.mask.nxv4i16(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vlseg8e16ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i64} @llvm.riscv.vlseg8ff.nxv4i16(i16* %base, i64 %vl)
; CHECK-LABEL: test_vlseg8ff_mask_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
-; CHECK-NEXT: vmv1r.v v10, v7
-; CHECK-NEXT: vmv1r.v v11, v7
-; CHECK-NEXT: vmv1r.v v12, v7
-; CHECK-NEXT: vmv1r.v v13, v7
-; CHECK-NEXT: vmv1r.v v14, v7
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vlseg8e16ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i64} @llvm.riscv.vlseg8ff.mask.nxv4i16(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vlseg2e8ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>, i64} @llvm.riscv.vlseg2ff.nxv1i8(i8* %base, i64 %vl)
; CHECK-NEXT: vlseg2e8ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>, i64} @llvm.riscv.vlseg2ff.mask.nxv1i8(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vlseg3e8ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i64} @llvm.riscv.vlseg3ff.nxv1i8(i8* %base, i64 %vl)
; CHECK-LABEL: test_vlseg3ff_mask_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vlseg3e8ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i64} @llvm.riscv.vlseg3ff.mask.nxv1i8(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vlseg4e8ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i64} @llvm.riscv.vlseg4ff.nxv1i8(i8* %base, i64 %vl)
; CHECK-LABEL: test_vlseg4ff_mask_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
-; CHECK-NEXT: vmv1r.v v10, v7
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vlseg4e8ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i64} @llvm.riscv.vlseg4ff.mask.nxv1i8(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vlseg5e8ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i64} @llvm.riscv.vlseg5ff.nxv1i8(i8* %base, i64 %vl)
; CHECK-LABEL: test_vlseg5ff_mask_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
-; CHECK-NEXT: vmv1r.v v10, v7
-; CHECK-NEXT: vmv1r.v v11, v7
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vmv1r.v v11, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vlseg5e8ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i64} @llvm.riscv.vlseg5ff.mask.nxv1i8(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vlseg6e8ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i64} @llvm.riscv.vlseg6ff.nxv1i8(i8* %base, i64 %vl)
; CHECK-LABEL: test_vlseg6ff_mask_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
-; CHECK-NEXT: vmv1r.v v10, v7
-; CHECK-NEXT: vmv1r.v v11, v7
-; CHECK-NEXT: vmv1r.v v12, v7
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vlseg6e8ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i64} @llvm.riscv.vlseg6ff.mask.nxv1i8(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vlseg7e8ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i64} @llvm.riscv.vlseg7ff.nxv1i8(i8* %base, i64 %vl)
; CHECK-LABEL: test_vlseg7ff_mask_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
-; CHECK-NEXT: vmv1r.v v10, v7
-; CHECK-NEXT: vmv1r.v v11, v7
-; CHECK-NEXT: vmv1r.v v12, v7
-; CHECK-NEXT: vmv1r.v v13, v7
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vlseg7e8ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i64} @llvm.riscv.vlseg7ff.mask.nxv1i8(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vlseg8e8ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i64} @llvm.riscv.vlseg8ff.nxv1i8(i8* %base, i64 %vl)
; CHECK-LABEL: test_vlseg8ff_mask_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
-; CHECK-NEXT: vmv1r.v v10, v7
-; CHECK-NEXT: vmv1r.v v11, v7
-; CHECK-NEXT: vmv1r.v v12, v7
-; CHECK-NEXT: vmv1r.v v13, v7
-; CHECK-NEXT: vmv1r.v v14, v7
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vlseg8e8ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i64} @llvm.riscv.vlseg8ff.mask.nxv1i8(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vlseg2e8ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>, i64} @llvm.riscv.vlseg2ff.nxv2i8(i8* %base, i64 %vl)
; CHECK-NEXT: vlseg2e8ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>, i64} @llvm.riscv.vlseg2ff.mask.nxv2i8(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, i8* %base, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vlseg3e8ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i64} @llvm.riscv.vlseg3ff.nxv2i8(i8* %base, i64 %vl)
; CHECK-LABEL: test_vlseg3ff_mask_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vlseg3e8ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i64} @llvm.riscv.vlseg3ff.mask.nxv2i8(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, i8* %base, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vlseg4e8ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i64} @llvm.riscv.vlseg4ff.nxv2i8(i8* %base, i64 %vl)
; CHECK-LABEL: test_vlseg4ff_mask_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
-; CHECK-NEXT: vmv1r.v v10, v7
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vlseg4e8ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i64} @llvm.riscv.vlseg4ff.mask.nxv2i8(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, i8* %base, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vlseg5e8ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i64} @llvm.riscv.vlseg5ff.nxv2i8(i8* %base, i64 %vl)
; CHECK-LABEL: test_vlseg5ff_mask_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
-; CHECK-NEXT: vmv1r.v v10, v7
-; CHECK-NEXT: vmv1r.v v11, v7
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vmv1r.v v11, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vlseg5e8ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i64} @llvm.riscv.vlseg5ff.mask.nxv2i8(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, i8* %base, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vlseg6e8ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i64} @llvm.riscv.vlseg6ff.nxv2i8(i8* %base, i64 %vl)
; CHECK-LABEL: test_vlseg6ff_mask_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
-; CHECK-NEXT: vmv1r.v v10, v7
-; CHECK-NEXT: vmv1r.v v11, v7
-; CHECK-NEXT: vmv1r.v v12, v7
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vlseg6e8ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i64} @llvm.riscv.vlseg6ff.mask.nxv2i8(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, i8* %base, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vlseg7e8ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i64} @llvm.riscv.vlseg7ff.nxv2i8(i8* %base, i64 %vl)
; CHECK-LABEL: test_vlseg7ff_mask_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
-; CHECK-NEXT: vmv1r.v v10, v7
-; CHECK-NEXT: vmv1r.v v11, v7
-; CHECK-NEXT: vmv1r.v v12, v7
-; CHECK-NEXT: vmv1r.v v13, v7
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vlseg7e8ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i64} @llvm.riscv.vlseg7ff.mask.nxv2i8(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, i8* %base, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vlseg8e8ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i64} @llvm.riscv.vlseg8ff.nxv2i8(i8* %base, i64 %vl)
; CHECK-LABEL: test_vlseg8ff_mask_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
-; CHECK-NEXT: vmv1r.v v10, v7
-; CHECK-NEXT: vmv1r.v v11, v7
-; CHECK-NEXT: vmv1r.v v12, v7
-; CHECK-NEXT: vmv1r.v v13, v7
-; CHECK-NEXT: vmv1r.v v14, v7
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vlseg8e8ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i64} @llvm.riscv.vlseg8ff.mask.nxv2i8(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, i8* %base, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vlseg2e32ff.v v4, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i32>,<vscale x 8 x i32>, i64} @llvm.riscv.vlseg2ff.nxv8i32(i32* %base, i64 %vl)
; CHECK-NEXT: vlseg2e32ff.v v4, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i32>,<vscale x 8 x i32>, i64} @llvm.riscv.vlseg2ff.mask.nxv8i32(<vscale x 8 x i32> %val,<vscale x 8 x i32> %val, i32* %base, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vlseg2e8ff.v v4, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 32 x i8>,<vscale x 32 x i8>, i64} @llvm.riscv.vlseg2ff.nxv32i8(i8* %base, i64 %vl)
; CHECK-NEXT: vlseg2e8ff.v v4, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 32 x i8>,<vscale x 32 x i8>, i64} @llvm.riscv.vlseg2ff.mask.nxv32i8(<vscale x 32 x i8> %val,<vscale x 32 x i8> %val, i8* %base, <vscale x 32 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vlseg2e16ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>, i64} @llvm.riscv.vlseg2ff.nxv2i16(i16* %base, i64 %vl)
; CHECK-NEXT: vlseg2e16ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>, i64} @llvm.riscv.vlseg2ff.mask.nxv2i16(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, i16* %base, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vlseg3e16ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i64} @llvm.riscv.vlseg3ff.nxv2i16(i16* %base, i64 %vl)
; CHECK-LABEL: test_vlseg3ff_mask_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vlseg3e16ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i64} @llvm.riscv.vlseg3ff.mask.nxv2i16(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, i16* %base, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vlseg4e16ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i64} @llvm.riscv.vlseg4ff.nxv2i16(i16* %base, i64 %vl)
; CHECK-LABEL: test_vlseg4ff_mask_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
-; CHECK-NEXT: vmv1r.v v10, v7
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vlseg4e16ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i64} @llvm.riscv.vlseg4ff.mask.nxv2i16(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, i16* %base, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vlseg5e16ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i64} @llvm.riscv.vlseg5ff.nxv2i16(i16* %base, i64 %vl)
; CHECK-LABEL: test_vlseg5ff_mask_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
-; CHECK-NEXT: vmv1r.v v10, v7
-; CHECK-NEXT: vmv1r.v v11, v7
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vmv1r.v v11, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vlseg5e16ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i64} @llvm.riscv.vlseg5ff.mask.nxv2i16(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, i16* %base, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vlseg6e16ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i64} @llvm.riscv.vlseg6ff.nxv2i16(i16* %base, i64 %vl)
; CHECK-LABEL: test_vlseg6ff_mask_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
-; CHECK-NEXT: vmv1r.v v10, v7
-; CHECK-NEXT: vmv1r.v v11, v7
-; CHECK-NEXT: vmv1r.v v12, v7
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vlseg6e16ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i64} @llvm.riscv.vlseg6ff.mask.nxv2i16(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, i16* %base, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vlseg7e16ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i64} @llvm.riscv.vlseg7ff.nxv2i16(i16* %base, i64 %vl)
; CHECK-LABEL: test_vlseg7ff_mask_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
-; CHECK-NEXT: vmv1r.v v10, v7
-; CHECK-NEXT: vmv1r.v v11, v7
-; CHECK-NEXT: vmv1r.v v12, v7
-; CHECK-NEXT: vmv1r.v v13, v7
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vlseg7e16ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i64} @llvm.riscv.vlseg7ff.mask.nxv2i16(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, i16* %base, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vlseg8e16ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i64} @llvm.riscv.vlseg8ff.nxv2i16(i16* %base, i64 %vl)
; CHECK-LABEL: test_vlseg8ff_mask_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
-; CHECK-NEXT: vmv1r.v v10, v7
-; CHECK-NEXT: vmv1r.v v11, v7
-; CHECK-NEXT: vmv1r.v v12, v7
-; CHECK-NEXT: vmv1r.v v13, v7
-; CHECK-NEXT: vmv1r.v v14, v7
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vlseg8e16ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i64} @llvm.riscv.vlseg8ff.mask.nxv2i16(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, i16* %base, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vlseg2e64ff.v v6, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i64>,<vscale x 2 x i64>, i64} @llvm.riscv.vlseg2ff.nxv2i64(i64* %base, i64 %vl)
; CHECK-NEXT: vlseg2e64ff.v v6, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i64>,<vscale x 2 x i64>, i64} @llvm.riscv.vlseg2ff.mask.nxv2i64(<vscale x 2 x i64> %val,<vscale x 2 x i64> %val, i64* %base, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vlseg3e64ff.v v6, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>, i64} @llvm.riscv.vlseg3ff.nxv2i64(i64* %base, i64 %vl)
; CHECK-LABEL: test_vlseg3ff_mask_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v6, v8
-; CHECK-NEXT: vmv2r.v v10, v6
+; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
; CHECK-NEXT: vlseg3e64ff.v v6, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>, i64} @llvm.riscv.vlseg3ff.mask.nxv2i64(<vscale x 2 x i64> %val,<vscale x 2 x i64> %val,<vscale x 2 x i64> %val, i64* %base, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vlseg4e64ff.v v6, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>, i64} @llvm.riscv.vlseg4ff.nxv2i64(i64* %base, i64 %vl)
; CHECK-LABEL: test_vlseg4ff_mask_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v6, v8
-; CHECK-NEXT: vmv2r.v v10, v6
-; CHECK-NEXT: vmv2r.v v12, v6
+; CHECK-NEXT: vmv2r.v v10, v8
+; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
; CHECK-NEXT: vlseg4e64ff.v v6, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>, i64} @llvm.riscv.vlseg4ff.mask.nxv2i64(<vscale x 2 x i64> %val,<vscale x 2 x i64> %val,<vscale x 2 x i64> %val,<vscale x 2 x i64> %val, i64* %base, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vlseg2e16ff.v v4, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 16 x half>,<vscale x 16 x half>, i64} @llvm.riscv.vlseg2ff.nxv16f16(half* %base, i64 %vl)
; CHECK-NEXT: vlseg2e16ff.v v4, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 16 x half>,<vscale x 16 x half>, i64} @llvm.riscv.vlseg2ff.mask.nxv16f16(<vscale x 16 x half> %val,<vscale x 16 x half> %val, half* %base, <vscale x 16 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vlseg2e64ff.v v4, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x double>,<vscale x 4 x double>, i64} @llvm.riscv.vlseg2ff.nxv4f64(double* %base, i64 %vl)
; CHECK-NEXT: vlseg2e64ff.v v4, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x double>,<vscale x 4 x double>, i64} @llvm.riscv.vlseg2ff.mask.nxv4f64(<vscale x 4 x double> %val,<vscale x 4 x double> %val, double* %base, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vlseg2e64ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>, i64} @llvm.riscv.vlseg2ff.nxv1f64(double* %base, i64 %vl)
; CHECK-NEXT: vlseg2e64ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>, i64} @llvm.riscv.vlseg2ff.mask.nxv1f64(<vscale x 1 x double> %val,<vscale x 1 x double> %val, double* %base, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vlseg3e64ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, i64} @llvm.riscv.vlseg3ff.nxv1f64(double* %base, i64 %vl)
; CHECK-LABEL: test_vlseg3ff_mask_nxv1f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vlseg3e64ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, i64} @llvm.riscv.vlseg3ff.mask.nxv1f64(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, double* %base, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vlseg4e64ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, i64} @llvm.riscv.vlseg4ff.nxv1f64(double* %base, i64 %vl)
; CHECK-LABEL: test_vlseg4ff_mask_nxv1f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
-; CHECK-NEXT: vmv1r.v v10, v7
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vlseg4e64ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, i64} @llvm.riscv.vlseg4ff.mask.nxv1f64(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, double* %base, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vlseg5e64ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, i64} @llvm.riscv.vlseg5ff.nxv1f64(double* %base, i64 %vl)
; CHECK-LABEL: test_vlseg5ff_mask_nxv1f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
-; CHECK-NEXT: vmv1r.v v10, v7
-; CHECK-NEXT: vmv1r.v v11, v7
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vmv1r.v v11, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vlseg5e64ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, i64} @llvm.riscv.vlseg5ff.mask.nxv1f64(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, double* %base, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vlseg6e64ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, i64} @llvm.riscv.vlseg6ff.nxv1f64(double* %base, i64 %vl)
; CHECK-LABEL: test_vlseg6ff_mask_nxv1f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
-; CHECK-NEXT: vmv1r.v v10, v7
-; CHECK-NEXT: vmv1r.v v11, v7
-; CHECK-NEXT: vmv1r.v v12, v7
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vlseg6e64ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, i64} @llvm.riscv.vlseg6ff.mask.nxv1f64(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, double* %base, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vlseg7e64ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, i64} @llvm.riscv.vlseg7ff.nxv1f64(double* %base, i64 %vl)
; CHECK-LABEL: test_vlseg7ff_mask_nxv1f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
-; CHECK-NEXT: vmv1r.v v10, v7
-; CHECK-NEXT: vmv1r.v v11, v7
-; CHECK-NEXT: vmv1r.v v12, v7
-; CHECK-NEXT: vmv1r.v v13, v7
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vlseg7e64ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, i64} @llvm.riscv.vlseg7ff.mask.nxv1f64(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, double* %base, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vlseg8e64ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, i64} @llvm.riscv.vlseg8ff.nxv1f64(double* %base, i64 %vl)
; CHECK-LABEL: test_vlseg8ff_mask_nxv1f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
-; CHECK-NEXT: vmv1r.v v10, v7
-; CHECK-NEXT: vmv1r.v v11, v7
-; CHECK-NEXT: vmv1r.v v12, v7
-; CHECK-NEXT: vmv1r.v v13, v7
-; CHECK-NEXT: vmv1r.v v14, v7
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vlseg8e64ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, i64} @llvm.riscv.vlseg8ff.mask.nxv1f64(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, double* %base, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vlseg2e32ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>, i64} @llvm.riscv.vlseg2ff.nxv2f32(float* %base, i64 %vl)
; CHECK-NEXT: vlseg2e32ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>, i64} @llvm.riscv.vlseg2ff.mask.nxv2f32(<vscale x 2 x float> %val,<vscale x 2 x float> %val, float* %base, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vlseg3e32ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, i64} @llvm.riscv.vlseg3ff.nxv2f32(float* %base, i64 %vl)
; CHECK-LABEL: test_vlseg3ff_mask_nxv2f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vlseg3e32ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, i64} @llvm.riscv.vlseg3ff.mask.nxv2f32(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, float* %base, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vlseg4e32ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, i64} @llvm.riscv.vlseg4ff.nxv2f32(float* %base, i64 %vl)
; CHECK-LABEL: test_vlseg4ff_mask_nxv2f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
-; CHECK-NEXT: vmv1r.v v10, v7
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vlseg4e32ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, i64} @llvm.riscv.vlseg4ff.mask.nxv2f32(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, float* %base, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vlseg5e32ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, i64} @llvm.riscv.vlseg5ff.nxv2f32(float* %base, i64 %vl)
; CHECK-LABEL: test_vlseg5ff_mask_nxv2f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
-; CHECK-NEXT: vmv1r.v v10, v7
-; CHECK-NEXT: vmv1r.v v11, v7
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vmv1r.v v11, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vlseg5e32ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, i64} @llvm.riscv.vlseg5ff.mask.nxv2f32(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, float* %base, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vlseg6e32ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, i64} @llvm.riscv.vlseg6ff.nxv2f32(float* %base, i64 %vl)
; CHECK-LABEL: test_vlseg6ff_mask_nxv2f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
-; CHECK-NEXT: vmv1r.v v10, v7
-; CHECK-NEXT: vmv1r.v v11, v7
-; CHECK-NEXT: vmv1r.v v12, v7
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vlseg6e32ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, i64} @llvm.riscv.vlseg6ff.mask.nxv2f32(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, float* %base, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vlseg7e32ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, i64} @llvm.riscv.vlseg7ff.nxv2f32(float* %base, i64 %vl)
; CHECK-LABEL: test_vlseg7ff_mask_nxv2f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
-; CHECK-NEXT: vmv1r.v v10, v7
-; CHECK-NEXT: vmv1r.v v11, v7
-; CHECK-NEXT: vmv1r.v v12, v7
-; CHECK-NEXT: vmv1r.v v13, v7
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vlseg7e32ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, i64} @llvm.riscv.vlseg7ff.mask.nxv2f32(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, float* %base, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vlseg8e32ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, i64} @llvm.riscv.vlseg8ff.nxv2f32(float* %base, i64 %vl)
; CHECK-LABEL: test_vlseg8ff_mask_nxv2f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
-; CHECK-NEXT: vmv1r.v v10, v7
-; CHECK-NEXT: vmv1r.v v11, v7
-; CHECK-NEXT: vmv1r.v v12, v7
-; CHECK-NEXT: vmv1r.v v13, v7
-; CHECK-NEXT: vmv1r.v v14, v7
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vlseg8e32ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, i64} @llvm.riscv.vlseg8ff.mask.nxv2f32(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, float* %base, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vlseg2e16ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>, i64} @llvm.riscv.vlseg2ff.nxv1f16(half* %base, i64 %vl)
; CHECK-NEXT: vlseg2e16ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>, i64} @llvm.riscv.vlseg2ff.mask.nxv1f16(<vscale x 1 x half> %val,<vscale x 1 x half> %val, half* %base, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vlseg3e16ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, i64} @llvm.riscv.vlseg3ff.nxv1f16(half* %base, i64 %vl)
; CHECK-LABEL: test_vlseg3ff_mask_nxv1f16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vlseg3e16ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, i64} @llvm.riscv.vlseg3ff.mask.nxv1f16(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, half* %base, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vlseg4e16ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, i64} @llvm.riscv.vlseg4ff.nxv1f16(half* %base, i64 %vl)
; CHECK-LABEL: test_vlseg4ff_mask_nxv1f16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
-; CHECK-NEXT: vmv1r.v v10, v7
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vlseg4e16ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, i64} @llvm.riscv.vlseg4ff.mask.nxv1f16(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, half* %base, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vlseg5e16ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, i64} @llvm.riscv.vlseg5ff.nxv1f16(half* %base, i64 %vl)
; CHECK-LABEL: test_vlseg5ff_mask_nxv1f16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
-; CHECK-NEXT: vmv1r.v v10, v7
-; CHECK-NEXT: vmv1r.v v11, v7
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vmv1r.v v11, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vlseg5e16ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, i64} @llvm.riscv.vlseg5ff.mask.nxv1f16(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, half* %base, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vlseg6e16ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, i64} @llvm.riscv.vlseg6ff.nxv1f16(half* %base, i64 %vl)
; CHECK-LABEL: test_vlseg6ff_mask_nxv1f16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
-; CHECK-NEXT: vmv1r.v v10, v7
-; CHECK-NEXT: vmv1r.v v11, v7
-; CHECK-NEXT: vmv1r.v v12, v7
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vlseg6e16ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, i64} @llvm.riscv.vlseg6ff.mask.nxv1f16(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, half* %base, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vlseg7e16ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, i64} @llvm.riscv.vlseg7ff.nxv1f16(half* %base, i64 %vl)
; CHECK-LABEL: test_vlseg7ff_mask_nxv1f16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
-; CHECK-NEXT: vmv1r.v v10, v7
-; CHECK-NEXT: vmv1r.v v11, v7
-; CHECK-NEXT: vmv1r.v v12, v7
-; CHECK-NEXT: vmv1r.v v13, v7
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vlseg7e16ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, i64} @llvm.riscv.vlseg7ff.mask.nxv1f16(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, half* %base, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vlseg8e16ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, i64} @llvm.riscv.vlseg8ff.nxv1f16(half* %base, i64 %vl)
; CHECK-LABEL: test_vlseg8ff_mask_nxv1f16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
-; CHECK-NEXT: vmv1r.v v10, v7
-; CHECK-NEXT: vmv1r.v v11, v7
-; CHECK-NEXT: vmv1r.v v12, v7
-; CHECK-NEXT: vmv1r.v v13, v7
-; CHECK-NEXT: vmv1r.v v14, v7
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vlseg8e16ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, i64} @llvm.riscv.vlseg8ff.mask.nxv1f16(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, half* %base, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vlseg2e32ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>, i64} @llvm.riscv.vlseg2ff.nxv1f32(float* %base, i64 %vl)
; CHECK-NEXT: vlseg2e32ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>, i64} @llvm.riscv.vlseg2ff.mask.nxv1f32(<vscale x 1 x float> %val,<vscale x 1 x float> %val, float* %base, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vlseg3e32ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, i64} @llvm.riscv.vlseg3ff.nxv1f32(float* %base, i64 %vl)
; CHECK-LABEL: test_vlseg3ff_mask_nxv1f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vlseg3e32ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, i64} @llvm.riscv.vlseg3ff.mask.nxv1f32(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, float* %base, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vlseg4e32ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, i64} @llvm.riscv.vlseg4ff.nxv1f32(float* %base, i64 %vl)
; CHECK-LABEL: test_vlseg4ff_mask_nxv1f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
-; CHECK-NEXT: vmv1r.v v10, v7
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vlseg4e32ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, i64} @llvm.riscv.vlseg4ff.mask.nxv1f32(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, float* %base, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vlseg5e32ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, i64} @llvm.riscv.vlseg5ff.nxv1f32(float* %base, i64 %vl)
; CHECK-LABEL: test_vlseg5ff_mask_nxv1f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
-; CHECK-NEXT: vmv1r.v v10, v7
-; CHECK-NEXT: vmv1r.v v11, v7
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vmv1r.v v11, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vlseg5e32ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, i64} @llvm.riscv.vlseg5ff.mask.nxv1f32(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, float* %base, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vlseg6e32ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, i64} @llvm.riscv.vlseg6ff.nxv1f32(float* %base, i64 %vl)
; CHECK-LABEL: test_vlseg6ff_mask_nxv1f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
-; CHECK-NEXT: vmv1r.v v10, v7
-; CHECK-NEXT: vmv1r.v v11, v7
-; CHECK-NEXT: vmv1r.v v12, v7
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vlseg6e32ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, i64} @llvm.riscv.vlseg6ff.mask.nxv1f32(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, float* %base, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vlseg7e32ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, i64} @llvm.riscv.vlseg7ff.nxv1f32(float* %base, i64 %vl)
; CHECK-LABEL: test_vlseg7ff_mask_nxv1f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
-; CHECK-NEXT: vmv1r.v v10, v7
-; CHECK-NEXT: vmv1r.v v11, v7
-; CHECK-NEXT: vmv1r.v v12, v7
-; CHECK-NEXT: vmv1r.v v13, v7
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vlseg7e32ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, i64} @llvm.riscv.vlseg7ff.mask.nxv1f32(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, float* %base, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vlseg8e32ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, i64} @llvm.riscv.vlseg8ff.nxv1f32(float* %base, i64 %vl)
; CHECK-LABEL: test_vlseg8ff_mask_nxv1f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
-; CHECK-NEXT: vmv1r.v v10, v7
-; CHECK-NEXT: vmv1r.v v11, v7
-; CHECK-NEXT: vmv1r.v v12, v7
-; CHECK-NEXT: vmv1r.v v13, v7
-; CHECK-NEXT: vmv1r.v v14, v7
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vlseg8e32ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, i64} @llvm.riscv.vlseg8ff.mask.nxv1f32(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, float* %base, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vlseg2e16ff.v v6, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>, i64} @llvm.riscv.vlseg2ff.nxv8f16(half* %base, i64 %vl)
; CHECK-NEXT: vlseg2e16ff.v v6, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>, i64} @llvm.riscv.vlseg2ff.mask.nxv8f16(<vscale x 8 x half> %val,<vscale x 8 x half> %val, half* %base, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vlseg3e16ff.v v6, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>, i64} @llvm.riscv.vlseg3ff.nxv8f16(half* %base, i64 %vl)
; CHECK-LABEL: test_vlseg3ff_mask_nxv8f16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v6, v8
-; CHECK-NEXT: vmv2r.v v10, v6
+; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
; CHECK-NEXT: vlseg3e16ff.v v6, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>, i64} @llvm.riscv.vlseg3ff.mask.nxv8f16(<vscale x 8 x half> %val,<vscale x 8 x half> %val,<vscale x 8 x half> %val, half* %base, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vlseg4e16ff.v v6, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>, i64} @llvm.riscv.vlseg4ff.nxv8f16(half* %base, i64 %vl)
; CHECK-LABEL: test_vlseg4ff_mask_nxv8f16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v6, v8
-; CHECK-NEXT: vmv2r.v v10, v6
-; CHECK-NEXT: vmv2r.v v12, v6
+; CHECK-NEXT: vmv2r.v v10, v8
+; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
; CHECK-NEXT: vlseg4e16ff.v v6, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>, i64} @llvm.riscv.vlseg4ff.mask.nxv8f16(<vscale x 8 x half> %val,<vscale x 8 x half> %val,<vscale x 8 x half> %val,<vscale x 8 x half> %val, half* %base, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vlseg2e32ff.v v4, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x float>,<vscale x 8 x float>, i64} @llvm.riscv.vlseg2ff.nxv8f32(float* %base, i64 %vl)
; CHECK-NEXT: vlseg2e32ff.v v4, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x float>,<vscale x 8 x float>, i64} @llvm.riscv.vlseg2ff.mask.nxv8f32(<vscale x 8 x float> %val,<vscale x 8 x float> %val, float* %base, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vlseg2e64ff.v v6, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>, i64} @llvm.riscv.vlseg2ff.nxv2f64(double* %base, i64 %vl)
; CHECK-NEXT: vlseg2e64ff.v v6, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>, i64} @llvm.riscv.vlseg2ff.mask.nxv2f64(<vscale x 2 x double> %val,<vscale x 2 x double> %val, double* %base, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vlseg3e64ff.v v6, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>, i64} @llvm.riscv.vlseg3ff.nxv2f64(double* %base, i64 %vl)
; CHECK-LABEL: test_vlseg3ff_mask_nxv2f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v6, v8
-; CHECK-NEXT: vmv2r.v v10, v6
+; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
; CHECK-NEXT: vlseg3e64ff.v v6, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>, i64} @llvm.riscv.vlseg3ff.mask.nxv2f64(<vscale x 2 x double> %val,<vscale x 2 x double> %val,<vscale x 2 x double> %val, double* %base, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vlseg4e64ff.v v6, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>, i64} @llvm.riscv.vlseg4ff.nxv2f64(double* %base, i64 %vl)
; CHECK-LABEL: test_vlseg4ff_mask_nxv2f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v6, v8
-; CHECK-NEXT: vmv2r.v v10, v6
-; CHECK-NEXT: vmv2r.v v12, v6
+; CHECK-NEXT: vmv2r.v v10, v8
+; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
; CHECK-NEXT: vlseg4e64ff.v v6, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>, i64} @llvm.riscv.vlseg4ff.mask.nxv2f64(<vscale x 2 x double> %val,<vscale x 2 x double> %val,<vscale x 2 x double> %val,<vscale x 2 x double> %val, double* %base, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vlseg2e16ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>, i64} @llvm.riscv.vlseg2ff.nxv4f16(half* %base, i64 %vl)
; CHECK-NEXT: vlseg2e16ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>, i64} @llvm.riscv.vlseg2ff.mask.nxv4f16(<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vlseg3e16ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, i64} @llvm.riscv.vlseg3ff.nxv4f16(half* %base, i64 %vl)
; CHECK-LABEL: test_vlseg3ff_mask_nxv4f16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vlseg3e16ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, i64} @llvm.riscv.vlseg3ff.mask.nxv4f16(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vlseg4e16ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, i64} @llvm.riscv.vlseg4ff.nxv4f16(half* %base, i64 %vl)
; CHECK-LABEL: test_vlseg4ff_mask_nxv4f16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
-; CHECK-NEXT: vmv1r.v v10, v7
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vlseg4e16ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, i64} @llvm.riscv.vlseg4ff.mask.nxv4f16(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vlseg5e16ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, i64} @llvm.riscv.vlseg5ff.nxv4f16(half* %base, i64 %vl)
; CHECK-LABEL: test_vlseg5ff_mask_nxv4f16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
-; CHECK-NEXT: vmv1r.v v10, v7
-; CHECK-NEXT: vmv1r.v v11, v7
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vmv1r.v v11, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vlseg5e16ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, i64} @llvm.riscv.vlseg5ff.mask.nxv4f16(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vlseg6e16ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, i64} @llvm.riscv.vlseg6ff.nxv4f16(half* %base, i64 %vl)
; CHECK-LABEL: test_vlseg6ff_mask_nxv4f16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
-; CHECK-NEXT: vmv1r.v v10, v7
-; CHECK-NEXT: vmv1r.v v11, v7
-; CHECK-NEXT: vmv1r.v v12, v7
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vlseg6e16ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, i64} @llvm.riscv.vlseg6ff.mask.nxv4f16(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vlseg7e16ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, i64} @llvm.riscv.vlseg7ff.nxv4f16(half* %base, i64 %vl)
; CHECK-LABEL: test_vlseg7ff_mask_nxv4f16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
-; CHECK-NEXT: vmv1r.v v10, v7
-; CHECK-NEXT: vmv1r.v v11, v7
-; CHECK-NEXT: vmv1r.v v12, v7
-; CHECK-NEXT: vmv1r.v v13, v7
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vlseg7e16ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, i64} @llvm.riscv.vlseg7ff.mask.nxv4f16(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vlseg8e16ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, i64} @llvm.riscv.vlseg8ff.nxv4f16(half* %base, i64 %vl)
; CHECK-LABEL: test_vlseg8ff_mask_nxv4f16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
-; CHECK-NEXT: vmv1r.v v10, v7
-; CHECK-NEXT: vmv1r.v v11, v7
-; CHECK-NEXT: vmv1r.v v12, v7
-; CHECK-NEXT: vmv1r.v v13, v7
-; CHECK-NEXT: vmv1r.v v14, v7
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vlseg8e16ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, i64} @llvm.riscv.vlseg8ff.mask.nxv4f16(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vlseg2e16ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>, i64} @llvm.riscv.vlseg2ff.nxv2f16(half* %base, i64 %vl)
; CHECK-NEXT: vlseg2e16ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>, i64} @llvm.riscv.vlseg2ff.mask.nxv2f16(<vscale x 2 x half> %val,<vscale x 2 x half> %val, half* %base, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vlseg3e16ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, i64} @llvm.riscv.vlseg3ff.nxv2f16(half* %base, i64 %vl)
; CHECK-LABEL: test_vlseg3ff_mask_nxv2f16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vlseg3e16ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, i64} @llvm.riscv.vlseg3ff.mask.nxv2f16(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, half* %base, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vlseg4e16ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, i64} @llvm.riscv.vlseg4ff.nxv2f16(half* %base, i64 %vl)
; CHECK-LABEL: test_vlseg4ff_mask_nxv2f16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
-; CHECK-NEXT: vmv1r.v v10, v7
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vlseg4e16ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, i64} @llvm.riscv.vlseg4ff.mask.nxv2f16(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, half* %base, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vlseg5e16ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, i64} @llvm.riscv.vlseg5ff.nxv2f16(half* %base, i64 %vl)
; CHECK-LABEL: test_vlseg5ff_mask_nxv2f16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
-; CHECK-NEXT: vmv1r.v v10, v7
-; CHECK-NEXT: vmv1r.v v11, v7
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vmv1r.v v11, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vlseg5e16ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, i64} @llvm.riscv.vlseg5ff.mask.nxv2f16(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, half* %base, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vlseg6e16ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, i64} @llvm.riscv.vlseg6ff.nxv2f16(half* %base, i64 %vl)
; CHECK-LABEL: test_vlseg6ff_mask_nxv2f16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
-; CHECK-NEXT: vmv1r.v v10, v7
-; CHECK-NEXT: vmv1r.v v11, v7
-; CHECK-NEXT: vmv1r.v v12, v7
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vlseg6e16ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, i64} @llvm.riscv.vlseg6ff.mask.nxv2f16(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, half* %base, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vlseg7e16ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, i64} @llvm.riscv.vlseg7ff.nxv2f16(half* %base, i64 %vl)
; CHECK-LABEL: test_vlseg7ff_mask_nxv2f16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
-; CHECK-NEXT: vmv1r.v v10, v7
-; CHECK-NEXT: vmv1r.v v11, v7
-; CHECK-NEXT: vmv1r.v v12, v7
-; CHECK-NEXT: vmv1r.v v13, v7
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vlseg7e16ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, i64} @llvm.riscv.vlseg7ff.mask.nxv2f16(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, half* %base, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vlseg8e16ff.v v7, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, i64} @llvm.riscv.vlseg8ff.nxv2f16(half* %base, i64 %vl)
; CHECK-LABEL: test_vlseg8ff_mask_nxv2f16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
-; CHECK-NEXT: vmv1r.v v10, v7
-; CHECK-NEXT: vmv1r.v v11, v7
-; CHECK-NEXT: vmv1r.v v12, v7
-; CHECK-NEXT: vmv1r.v v13, v7
-; CHECK-NEXT: vmv1r.v v14, v7
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vlseg8e16ff.v v7, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, i64} @llvm.riscv.vlseg8ff.mask.nxv2f16(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, half* %base, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vlseg2e32ff.v v6, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>, i64} @llvm.riscv.vlseg2ff.nxv4f32(float* %base, i64 %vl)
; CHECK-NEXT: vlseg2e32ff.v v6, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>, i64} @llvm.riscv.vlseg2ff.mask.nxv4f32(<vscale x 4 x float> %val,<vscale x 4 x float> %val, float* %base, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vlseg3e32ff.v v6, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>, i64} @llvm.riscv.vlseg3ff.nxv4f32(float* %base, i64 %vl)
; CHECK-LABEL: test_vlseg3ff_mask_nxv4f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v6, v8
-; CHECK-NEXT: vmv2r.v v10, v6
+; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; CHECK-NEXT: vlseg3e32ff.v v6, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>, i64} @llvm.riscv.vlseg3ff.mask.nxv4f32(<vscale x 4 x float> %val,<vscale x 4 x float> %val,<vscale x 4 x float> %val, float* %base, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vlseg4e32ff.v v6, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>, i64} @llvm.riscv.vlseg4ff.nxv4f32(float* %base, i64 %vl)
; CHECK-LABEL: test_vlseg4ff_mask_nxv4f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v6, v8
-; CHECK-NEXT: vmv2r.v v10, v6
-; CHECK-NEXT: vmv2r.v v12, v6
+; CHECK-NEXT: vmv2r.v v10, v8
+; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; CHECK-NEXT: vlseg4e32ff.v v6, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a2)
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>, i64} @llvm.riscv.vlseg4ff.mask.nxv4f32(<vscale x 4 x float> %val,<vscale x 4 x float> %val,<vscale x 4 x float> %val,<vscale x 4 x float> %val, float* %base, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e16, m4, ta, mu
; CHECK-NEXT: vlsseg2e16.v v4, (a0), a1
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 16 x i16>,<vscale x 16 x i16>} @llvm.riscv.vlsseg2.nxv16i16(i16* %base, i32 %offset, i32 %vl)
; CHECK-NEXT: vlsseg2e16.v v4, (a0), a1
; CHECK-NEXT: vmv4r.v v8, v4
; CHECK-NEXT: vlsseg2e16.v v4, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 16 x i16>,<vscale x 16 x i16>} @llvm.riscv.vlsseg2.nxv16i16(i16* %base, i32 %offset, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e8, mf8, ta, mu
; CHECK-NEXT: vlsseg2e8.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlsseg2.nxv1i8(i8* %base, i32 %offset, i32 %vl)
; CHECK-NEXT: vlsseg2e8.v v7, (a0), a1
; CHECK-NEXT: vmv1r.v v8, v7
; CHECK-NEXT: vlsseg2e8.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlsseg2.nxv1i8(i8* %base, i32 %offset, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e8, mf8, ta, mu
; CHECK-NEXT: vlsseg3e8.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlsseg3.nxv1i8(i8* %base, i32 %offset, i32 %vl)
; CHECK-NEXT: vmv1r.v v8, v7
; CHECK-NEXT: vmv1r.v v9, v7
; CHECK-NEXT: vlsseg3e8.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlsseg3.nxv1i8(i8* %base, i32 %offset, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e8, mf8, ta, mu
; CHECK-NEXT: vlsseg4e8.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlsseg4.nxv1i8(i8* %base, i32 %offset, i32 %vl)
; CHECK-NEXT: vmv1r.v v9, v7
; CHECK-NEXT: vmv1r.v v10, v7
; CHECK-NEXT: vlsseg4e8.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlsseg4.nxv1i8(i8* %base, i32 %offset, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e8, mf8, ta, mu
; CHECK-NEXT: vlsseg5e8.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlsseg5.nxv1i8(i8* %base, i32 %offset, i32 %vl)
; CHECK-NEXT: vmv1r.v v10, v7
; CHECK-NEXT: vmv1r.v v11, v7
; CHECK-NEXT: vlsseg5e8.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlsseg5.nxv1i8(i8* %base, i32 %offset, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e8, mf8, ta, mu
; CHECK-NEXT: vlsseg6e8.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlsseg6.nxv1i8(i8* %base, i32 %offset, i32 %vl)
; CHECK-NEXT: vmv1r.v v11, v7
; CHECK-NEXT: vmv1r.v v12, v7
; CHECK-NEXT: vlsseg6e8.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlsseg6.nxv1i8(i8* %base, i32 %offset, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e8, mf8, ta, mu
; CHECK-NEXT: vlsseg7e8.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlsseg7.nxv1i8(i8* %base, i32 %offset, i32 %vl)
; CHECK-NEXT: vmv1r.v v12, v7
; CHECK-NEXT: vmv1r.v v13, v7
; CHECK-NEXT: vlsseg7e8.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlsseg7.nxv1i8(i8* %base, i32 %offset, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e8, mf8, ta, mu
; CHECK-NEXT: vlsseg8e8.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlsseg8.nxv1i8(i8* %base, i32 %offset, i32 %vl)
; CHECK-NEXT: vmv1r.v v13, v7
; CHECK-NEXT: vmv1r.v v14, v7
; CHECK-NEXT: vlsseg8e8.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlsseg8.nxv1i8(i8* %base, i32 %offset, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e8, m2, ta, mu
; CHECK-NEXT: vlsseg2e8.v v6, (a0), a1
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vlsseg2.nxv16i8(i8* %base, i32 %offset, i32 %vl)
; CHECK-NEXT: vlsseg2e8.v v6, (a0), a1
; CHECK-NEXT: vmv2r.v v8, v6
; CHECK-NEXT: vlsseg2e8.v v6, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vlsseg2.nxv16i8(i8* %base, i32 %offset, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e8, m2, ta, mu
; CHECK-NEXT: vlsseg3e8.v v6, (a0), a1
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vlsseg3.nxv16i8(i8* %base, i32 %offset, i32 %vl)
; CHECK-NEXT: vmv2r.v v8, v6
; CHECK-NEXT: vmv2r.v v10, v6
; CHECK-NEXT: vlsseg3e8.v v6, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vlsseg3.nxv16i8(i8* %base, i32 %offset, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e8, m2, ta, mu
; CHECK-NEXT: vlsseg4e8.v v6, (a0), a1
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vlsseg4.nxv16i8(i8* %base, i32 %offset, i32 %vl)
; CHECK-NEXT: vmv2r.v v10, v6
; CHECK-NEXT: vmv2r.v v12, v6
; CHECK-NEXT: vlsseg4e8.v v6, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vlsseg4.nxv16i8(i8* %base, i32 %offset, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, mu
; CHECK-NEXT: vlsseg2e32.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlsseg2.nxv2i32(i32* %base, i32 %offset, i32 %vl)
; CHECK-NEXT: vlsseg2e32.v v7, (a0), a1
; CHECK-NEXT: vmv1r.v v8, v7
; CHECK-NEXT: vlsseg2e32.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlsseg2.nxv2i32(i32* %base, i32 %offset, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, mu
; CHECK-NEXT: vlsseg3e32.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlsseg3.nxv2i32(i32* %base, i32 %offset, i32 %vl)
; CHECK-NEXT: vmv1r.v v8, v7
; CHECK-NEXT: vmv1r.v v9, v7
; CHECK-NEXT: vlsseg3e32.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlsseg3.nxv2i32(i32* %base, i32 %offset, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, mu
; CHECK-NEXT: vlsseg4e32.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlsseg4.nxv2i32(i32* %base, i32 %offset, i32 %vl)
; CHECK-NEXT: vmv1r.v v9, v7
; CHECK-NEXT: vmv1r.v v10, v7
; CHECK-NEXT: vlsseg4e32.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlsseg4.nxv2i32(i32* %base, i32 %offset, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, mu
; CHECK-NEXT: vlsseg5e32.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlsseg5.nxv2i32(i32* %base, i32 %offset, i32 %vl)
; CHECK-NEXT: vmv1r.v v10, v7
; CHECK-NEXT: vmv1r.v v11, v7
; CHECK-NEXT: vlsseg5e32.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlsseg5.nxv2i32(i32* %base, i32 %offset, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, mu
; CHECK-NEXT: vlsseg6e32.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlsseg6.nxv2i32(i32* %base, i32 %offset, i32 %vl)
; CHECK-NEXT: vmv1r.v v11, v7
; CHECK-NEXT: vmv1r.v v12, v7
; CHECK-NEXT: vlsseg6e32.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlsseg6.nxv2i32(i32* %base, i32 %offset, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, mu
; CHECK-NEXT: vlsseg7e32.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlsseg7.nxv2i32(i32* %base, i32 %offset, i32 %vl)
; CHECK-NEXT: vmv1r.v v12, v7
; CHECK-NEXT: vmv1r.v v13, v7
; CHECK-NEXT: vlsseg7e32.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlsseg7.nxv2i32(i32* %base, i32 %offset, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, mu
; CHECK-NEXT: vlsseg8e32.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlsseg8.nxv2i32(i32* %base, i32 %offset, i32 %vl)
; CHECK-NEXT: vmv1r.v v13, v7
; CHECK-NEXT: vmv1r.v v14, v7
; CHECK-NEXT: vlsseg8e32.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlsseg8.nxv2i32(i32* %base, i32 %offset, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, mu
; CHECK-NEXT: vlsseg2e16.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlsseg2.nxv4i16(i16* %base, i32 %offset, i32 %vl)
; CHECK-NEXT: vlsseg2e16.v v7, (a0), a1
; CHECK-NEXT: vmv1r.v v8, v7
; CHECK-NEXT: vlsseg2e16.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlsseg2.nxv4i16(i16* %base, i32 %offset, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, mu
; CHECK-NEXT: vlsseg3e16.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlsseg3.nxv4i16(i16* %base, i32 %offset, i32 %vl)
; CHECK-NEXT: vmv1r.v v8, v7
; CHECK-NEXT: vmv1r.v v9, v7
; CHECK-NEXT: vlsseg3e16.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlsseg3.nxv4i16(i16* %base, i32 %offset, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, mu
; CHECK-NEXT: vlsseg4e16.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlsseg4.nxv4i16(i16* %base, i32 %offset, i32 %vl)
; CHECK-NEXT: vmv1r.v v9, v7
; CHECK-NEXT: vmv1r.v v10, v7
; CHECK-NEXT: vlsseg4e16.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlsseg4.nxv4i16(i16* %base, i32 %offset, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, mu
; CHECK-NEXT: vlsseg5e16.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlsseg5.nxv4i16(i16* %base, i32 %offset, i32 %vl)
; CHECK-NEXT: vmv1r.v v10, v7
; CHECK-NEXT: vmv1r.v v11, v7
; CHECK-NEXT: vlsseg5e16.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlsseg5.nxv4i16(i16* %base, i32 %offset, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, mu
; CHECK-NEXT: vlsseg6e16.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlsseg6.nxv4i16(i16* %base, i32 %offset, i32 %vl)
; CHECK-NEXT: vmv1r.v v11, v7
; CHECK-NEXT: vmv1r.v v12, v7
; CHECK-NEXT: vlsseg6e16.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlsseg6.nxv4i16(i16* %base, i32 %offset, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, mu
; CHECK-NEXT: vlsseg7e16.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlsseg7.nxv4i16(i16* %base, i32 %offset, i32 %vl)
; CHECK-NEXT: vmv1r.v v12, v7
; CHECK-NEXT: vmv1r.v v13, v7
; CHECK-NEXT: vlsseg7e16.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlsseg7.nxv4i16(i16* %base, i32 %offset, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, mu
; CHECK-NEXT: vlsseg8e16.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlsseg8.nxv4i16(i16* %base, i32 %offset, i32 %vl)
; CHECK-NEXT: vmv1r.v v13, v7
; CHECK-NEXT: vmv1r.v v14, v7
; CHECK-NEXT: vlsseg8e16.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlsseg8.nxv4i16(i16* %base, i32 %offset, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, mu
; CHECK-NEXT: vlsseg2e32.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlsseg2.nxv1i32(i32* %base, i32 %offset, i32 %vl)
; CHECK-NEXT: vlsseg2e32.v v7, (a0), a1
; CHECK-NEXT: vmv1r.v v8, v7
; CHECK-NEXT: vlsseg2e32.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlsseg2.nxv1i32(i32* %base, i32 %offset, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, mu
; CHECK-NEXT: vlsseg3e32.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlsseg3.nxv1i32(i32* %base, i32 %offset, i32 %vl)
; CHECK-NEXT: vmv1r.v v8, v7
; CHECK-NEXT: vmv1r.v v9, v7
; CHECK-NEXT: vlsseg3e32.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlsseg3.nxv1i32(i32* %base, i32 %offset, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, mu
; CHECK-NEXT: vlsseg4e32.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlsseg4.nxv1i32(i32* %base, i32 %offset, i32 %vl)
; CHECK-NEXT: vmv1r.v v9, v7
; CHECK-NEXT: vmv1r.v v10, v7
; CHECK-NEXT: vlsseg4e32.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlsseg4.nxv1i32(i32* %base, i32 %offset, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, mu
; CHECK-NEXT: vlsseg5e32.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlsseg5.nxv1i32(i32* %base, i32 %offset, i32 %vl)
; CHECK-NEXT: vmv1r.v v10, v7
; CHECK-NEXT: vmv1r.v v11, v7
; CHECK-NEXT: vlsseg5e32.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlsseg5.nxv1i32(i32* %base, i32 %offset, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, mu
; CHECK-NEXT: vlsseg6e32.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlsseg6.nxv1i32(i32* %base, i32 %offset, i32 %vl)
; CHECK-NEXT: vmv1r.v v11, v7
; CHECK-NEXT: vmv1r.v v12, v7
; CHECK-NEXT: vlsseg6e32.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlsseg6.nxv1i32(i32* %base, i32 %offset, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, mu
; CHECK-NEXT: vlsseg7e32.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlsseg7.nxv1i32(i32* %base, i32 %offset, i32 %vl)
; CHECK-NEXT: vmv1r.v v12, v7
; CHECK-NEXT: vmv1r.v v13, v7
; CHECK-NEXT: vlsseg7e32.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlsseg7.nxv1i32(i32* %base, i32 %offset, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, mu
; CHECK-NEXT: vlsseg8e32.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlsseg8.nxv1i32(i32* %base, i32 %offset, i32 %vl)
; CHECK-NEXT: vmv1r.v v13, v7
; CHECK-NEXT: vmv1r.v v14, v7
; CHECK-NEXT: vlsseg8e32.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlsseg8.nxv1i32(i32* %base, i32 %offset, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, mu
; CHECK-NEXT: vlsseg2e16.v v6, (a0), a1
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vlsseg2.nxv8i16(i16* %base, i32 %offset, i32 %vl)
; CHECK-NEXT: vlsseg2e16.v v6, (a0), a1
; CHECK-NEXT: vmv2r.v v8, v6
; CHECK-NEXT: vlsseg2e16.v v6, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vlsseg2.nxv8i16(i16* %base, i32 %offset, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, mu
; CHECK-NEXT: vlsseg3e16.v v6, (a0), a1
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vlsseg3.nxv8i16(i16* %base, i32 %offset, i32 %vl)
; CHECK-NEXT: vmv2r.v v8, v6
; CHECK-NEXT: vmv2r.v v10, v6
; CHECK-NEXT: vlsseg3e16.v v6, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vlsseg3.nxv8i16(i16* %base, i32 %offset, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, mu
; CHECK-NEXT: vlsseg4e16.v v6, (a0), a1
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vlsseg4.nxv8i16(i16* %base, i32 %offset, i32 %vl)
; CHECK-NEXT: vmv2r.v v10, v6
; CHECK-NEXT: vmv2r.v v12, v6
; CHECK-NEXT: vlsseg4e16.v v6, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vlsseg4.nxv8i16(i16* %base, i32 %offset, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e8, m1, ta, mu
; CHECK-NEXT: vlsseg2e8.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlsseg2.nxv8i8(i8* %base, i32 %offset, i32 %vl)
; CHECK-NEXT: vlsseg2e8.v v7, (a0), a1
; CHECK-NEXT: vmv1r.v v8, v7
; CHECK-NEXT: vlsseg2e8.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlsseg2.nxv8i8(i8* %base, i32 %offset, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e8, m1, ta, mu
; CHECK-NEXT: vlsseg3e8.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlsseg3.nxv8i8(i8* %base, i32 %offset, i32 %vl)
; CHECK-NEXT: vmv1r.v v8, v7
; CHECK-NEXT: vmv1r.v v9, v7
; CHECK-NEXT: vlsseg3e8.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlsseg3.nxv8i8(i8* %base, i32 %offset, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e8, m1, ta, mu
; CHECK-NEXT: vlsseg4e8.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlsseg4.nxv8i8(i8* %base, i32 %offset, i32 %vl)
; CHECK-NEXT: vmv1r.v v9, v7
; CHECK-NEXT: vmv1r.v v10, v7
; CHECK-NEXT: vlsseg4e8.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlsseg4.nxv8i8(i8* %base, i32 %offset, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e8, m1, ta, mu
; CHECK-NEXT: vlsseg5e8.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlsseg5.nxv8i8(i8* %base, i32 %offset, i32 %vl)
; CHECK-NEXT: vmv1r.v v10, v7
; CHECK-NEXT: vmv1r.v v11, v7
; CHECK-NEXT: vlsseg5e8.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlsseg5.nxv8i8(i8* %base, i32 %offset, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e8, m1, ta, mu
; CHECK-NEXT: vlsseg6e8.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlsseg6.nxv8i8(i8* %base, i32 %offset, i32 %vl)
; CHECK-NEXT: vmv1r.v v11, v7
; CHECK-NEXT: vmv1r.v v12, v7
; CHECK-NEXT: vlsseg6e8.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlsseg6.nxv8i8(i8* %base, i32 %offset, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e8, m1, ta, mu
; CHECK-NEXT: vlsseg7e8.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlsseg7.nxv8i8(i8* %base, i32 %offset, i32 %vl)
; CHECK-NEXT: vmv1r.v v12, v7
; CHECK-NEXT: vmv1r.v v13, v7
; CHECK-NEXT: vlsseg7e8.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlsseg7.nxv8i8(i8* %base, i32 %offset, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e8, m1, ta, mu
; CHECK-NEXT: vlsseg8e8.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlsseg8.nxv8i8(i8* %base, i32 %offset, i32 %vl)
; CHECK-NEXT: vmv1r.v v13, v7
; CHECK-NEXT: vmv1r.v v14, v7
; CHECK-NEXT: vlsseg8e8.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlsseg8.nxv8i8(i8* %base, i32 %offset, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e32, m4, ta, mu
; CHECK-NEXT: vlsseg2e32.v v4, (a0), a1
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i32>,<vscale x 8 x i32>} @llvm.riscv.vlsseg2.nxv8i32(i32* %base, i32 %offset, i32 %vl)
; CHECK-NEXT: vlsseg2e32.v v4, (a0), a1
; CHECK-NEXT: vmv4r.v v8, v4
; CHECK-NEXT: vlsseg2e32.v v4, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i32>,<vscale x 8 x i32>} @llvm.riscv.vlsseg2.nxv8i32(i32* %base, i32 %offset, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e8, mf2, ta, mu
; CHECK-NEXT: vlsseg2e8.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlsseg2.nxv4i8(i8* %base, i32 %offset, i32 %vl)
; CHECK-NEXT: vlsseg2e8.v v7, (a0), a1
; CHECK-NEXT: vmv1r.v v8, v7
; CHECK-NEXT: vlsseg2e8.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlsseg2.nxv4i8(i8* %base, i32 %offset, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e8, mf2, ta, mu
; CHECK-NEXT: vlsseg3e8.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlsseg3.nxv4i8(i8* %base, i32 %offset, i32 %vl)
; CHECK-NEXT: vmv1r.v v8, v7
; CHECK-NEXT: vmv1r.v v9, v7
; CHECK-NEXT: vlsseg3e8.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlsseg3.nxv4i8(i8* %base, i32 %offset, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e8, mf2, ta, mu
; CHECK-NEXT: vlsseg4e8.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlsseg4.nxv4i8(i8* %base, i32 %offset, i32 %vl)
; CHECK-NEXT: vmv1r.v v9, v7
; CHECK-NEXT: vmv1r.v v10, v7
; CHECK-NEXT: vlsseg4e8.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlsseg4.nxv4i8(i8* %base, i32 %offset, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e8, mf2, ta, mu
; CHECK-NEXT: vlsseg5e8.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlsseg5.nxv4i8(i8* %base, i32 %offset, i32 %vl)
; CHECK-NEXT: vmv1r.v v10, v7
; CHECK-NEXT: vmv1r.v v11, v7
; CHECK-NEXT: vlsseg5e8.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlsseg5.nxv4i8(i8* %base, i32 %offset, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e8, mf2, ta, mu
; CHECK-NEXT: vlsseg6e8.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlsseg6.nxv4i8(i8* %base, i32 %offset, i32 %vl)
; CHECK-NEXT: vmv1r.v v11, v7
; CHECK-NEXT: vmv1r.v v12, v7
; CHECK-NEXT: vlsseg6e8.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlsseg6.nxv4i8(i8* %base, i32 %offset, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e8, mf2, ta, mu
; CHECK-NEXT: vlsseg7e8.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlsseg7.nxv4i8(i8* %base, i32 %offset, i32 %vl)
; CHECK-NEXT: vmv1r.v v12, v7
; CHECK-NEXT: vmv1r.v v13, v7
; CHECK-NEXT: vlsseg7e8.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlsseg7.nxv4i8(i8* %base, i32 %offset, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e8, mf2, ta, mu
; CHECK-NEXT: vlsseg8e8.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlsseg8.nxv4i8(i8* %base, i32 %offset, i32 %vl)
; CHECK-NEXT: vmv1r.v v13, v7
; CHECK-NEXT: vmv1r.v v14, v7
; CHECK-NEXT: vlsseg8e8.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlsseg8.nxv4i8(i8* %base, i32 %offset, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, mu
; CHECK-NEXT: vlsseg2e16.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlsseg2.nxv1i16(i16* %base, i32 %offset, i32 %vl)
; CHECK-NEXT: vlsseg2e16.v v7, (a0), a1
; CHECK-NEXT: vmv1r.v v8, v7
; CHECK-NEXT: vlsseg2e16.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlsseg2.nxv1i16(i16* %base, i32 %offset, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, mu
; CHECK-NEXT: vlsseg3e16.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlsseg3.nxv1i16(i16* %base, i32 %offset, i32 %vl)
; CHECK-NEXT: vmv1r.v v8, v7
; CHECK-NEXT: vmv1r.v v9, v7
; CHECK-NEXT: vlsseg3e16.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlsseg3.nxv1i16(i16* %base, i32 %offset, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, mu
; CHECK-NEXT: vlsseg4e16.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlsseg4.nxv1i16(i16* %base, i32 %offset, i32 %vl)
; CHECK-NEXT: vmv1r.v v9, v7
; CHECK-NEXT: vmv1r.v v10, v7
; CHECK-NEXT: vlsseg4e16.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlsseg4.nxv1i16(i16* %base, i32 %offset, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, mu
; CHECK-NEXT: vlsseg5e16.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlsseg5.nxv1i16(i16* %base, i32 %offset, i32 %vl)
; CHECK-NEXT: vmv1r.v v10, v7
; CHECK-NEXT: vmv1r.v v11, v7
; CHECK-NEXT: vlsseg5e16.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlsseg5.nxv1i16(i16* %base, i32 %offset, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, mu
; CHECK-NEXT: vlsseg6e16.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlsseg6.nxv1i16(i16* %base, i32 %offset, i32 %vl)
; CHECK-NEXT: vmv1r.v v11, v7
; CHECK-NEXT: vmv1r.v v12, v7
; CHECK-NEXT: vlsseg6e16.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlsseg6.nxv1i16(i16* %base, i32 %offset, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, mu
; CHECK-NEXT: vlsseg7e16.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlsseg7.nxv1i16(i16* %base, i32 %offset, i32 %vl)
; CHECK-NEXT: vmv1r.v v12, v7
; CHECK-NEXT: vmv1r.v v13, v7
; CHECK-NEXT: vlsseg7e16.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlsseg7.nxv1i16(i16* %base, i32 %offset, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, mu
; CHECK-NEXT: vlsseg8e16.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlsseg8.nxv1i16(i16* %base, i32 %offset, i32 %vl)
; CHECK-NEXT: vmv1r.v v13, v7
; CHECK-NEXT: vmv1r.v v14, v7
; CHECK-NEXT: vlsseg8e16.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlsseg8.nxv1i16(i16* %base, i32 %offset, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e8, m4, ta, mu
; CHECK-NEXT: vlsseg2e8.v v4, (a0), a1
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 32 x i8>,<vscale x 32 x i8>} @llvm.riscv.vlsseg2.nxv32i8(i8* %base, i32 %offset, i32 %vl)
; CHECK-NEXT: vlsseg2e8.v v4, (a0), a1
; CHECK-NEXT: vmv4r.v v8, v4
; CHECK-NEXT: vlsseg2e8.v v4, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 32 x i8>,<vscale x 32 x i8>} @llvm.riscv.vlsseg2.nxv32i8(i8* %base, i32 %offset, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e8, mf4, ta, mu
; CHECK-NEXT: vlsseg2e8.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlsseg2.nxv2i8(i8* %base, i32 %offset, i32 %vl)
; CHECK-NEXT: vlsseg2e8.v v7, (a0), a1
; CHECK-NEXT: vmv1r.v v8, v7
; CHECK-NEXT: vlsseg2e8.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlsseg2.nxv2i8(i8* %base, i32 %offset, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e8, mf4, ta, mu
; CHECK-NEXT: vlsseg3e8.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlsseg3.nxv2i8(i8* %base, i32 %offset, i32 %vl)
; CHECK-NEXT: vmv1r.v v8, v7
; CHECK-NEXT: vmv1r.v v9, v7
; CHECK-NEXT: vlsseg3e8.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlsseg3.nxv2i8(i8* %base, i32 %offset, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e8, mf4, ta, mu
; CHECK-NEXT: vlsseg4e8.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlsseg4.nxv2i8(i8* %base, i32 %offset, i32 %vl)
; CHECK-NEXT: vmv1r.v v9, v7
; CHECK-NEXT: vmv1r.v v10, v7
; CHECK-NEXT: vlsseg4e8.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlsseg4.nxv2i8(i8* %base, i32 %offset, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e8, mf4, ta, mu
; CHECK-NEXT: vlsseg5e8.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlsseg5.nxv2i8(i8* %base, i32 %offset, i32 %vl)
; CHECK-NEXT: vmv1r.v v10, v7
; CHECK-NEXT: vmv1r.v v11, v7
; CHECK-NEXT: vlsseg5e8.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlsseg5.nxv2i8(i8* %base, i32 %offset, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e8, mf4, ta, mu
; CHECK-NEXT: vlsseg6e8.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlsseg6.nxv2i8(i8* %base, i32 %offset, i32 %vl)
; CHECK-NEXT: vmv1r.v v11, v7
; CHECK-NEXT: vmv1r.v v12, v7
; CHECK-NEXT: vlsseg6e8.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlsseg6.nxv2i8(i8* %base, i32 %offset, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e8, mf4, ta, mu
; CHECK-NEXT: vlsseg7e8.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlsseg7.nxv2i8(i8* %base, i32 %offset, i32 %vl)
; CHECK-NEXT: vmv1r.v v12, v7
; CHECK-NEXT: vmv1r.v v13, v7
; CHECK-NEXT: vlsseg7e8.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlsseg7.nxv2i8(i8* %base, i32 %offset, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e8, mf4, ta, mu
; CHECK-NEXT: vlsseg8e8.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlsseg8.nxv2i8(i8* %base, i32 %offset, i32 %vl)
; CHECK-NEXT: vmv1r.v v13, v7
; CHECK-NEXT: vmv1r.v v14, v7
; CHECK-NEXT: vlsseg8e8.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlsseg8.nxv2i8(i8* %base, i32 %offset, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, mu
; CHECK-NEXT: vlsseg2e16.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlsseg2.nxv2i16(i16* %base, i32 %offset, i32 %vl)
; CHECK-NEXT: vlsseg2e16.v v7, (a0), a1
; CHECK-NEXT: vmv1r.v v8, v7
; CHECK-NEXT: vlsseg2e16.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlsseg2.nxv2i16(i16* %base, i32 %offset, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, mu
; CHECK-NEXT: vlsseg3e16.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlsseg3.nxv2i16(i16* %base, i32 %offset, i32 %vl)
; CHECK-NEXT: vmv1r.v v8, v7
; CHECK-NEXT: vmv1r.v v9, v7
; CHECK-NEXT: vlsseg3e16.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlsseg3.nxv2i16(i16* %base, i32 %offset, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, mu
; CHECK-NEXT: vlsseg4e16.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlsseg4.nxv2i16(i16* %base, i32 %offset, i32 %vl)
; CHECK-NEXT: vmv1r.v v9, v7
; CHECK-NEXT: vmv1r.v v10, v7
; CHECK-NEXT: vlsseg4e16.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlsseg4.nxv2i16(i16* %base, i32 %offset, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, mu
; CHECK-NEXT: vlsseg5e16.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlsseg5.nxv2i16(i16* %base, i32 %offset, i32 %vl)
; CHECK-NEXT: vmv1r.v v10, v7
; CHECK-NEXT: vmv1r.v v11, v7
; CHECK-NEXT: vlsseg5e16.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlsseg5.nxv2i16(i16* %base, i32 %offset, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, mu
; CHECK-NEXT: vlsseg6e16.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlsseg6.nxv2i16(i16* %base, i32 %offset, i32 %vl)
; CHECK-NEXT: vmv1r.v v11, v7
; CHECK-NEXT: vmv1r.v v12, v7
; CHECK-NEXT: vlsseg6e16.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlsseg6.nxv2i16(i16* %base, i32 %offset, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, mu
; CHECK-NEXT: vlsseg7e16.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlsseg7.nxv2i16(i16* %base, i32 %offset, i32 %vl)
; CHECK-NEXT: vmv1r.v v12, v7
; CHECK-NEXT: vmv1r.v v13, v7
; CHECK-NEXT: vlsseg7e16.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlsseg7.nxv2i16(i16* %base, i32 %offset, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, mu
; CHECK-NEXT: vlsseg8e16.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlsseg8.nxv2i16(i16* %base, i32 %offset, i32 %vl)
; CHECK-NEXT: vmv1r.v v13, v7
; CHECK-NEXT: vmv1r.v v14, v7
; CHECK-NEXT: vlsseg8e16.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlsseg8.nxv2i16(i16* %base, i32 %offset, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e32, m2, ta, mu
; CHECK-NEXT: vlsseg2e32.v v6, (a0), a1
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vlsseg2.nxv4i32(i32* %base, i32 %offset, i32 %vl)
; CHECK-NEXT: vlsseg2e32.v v6, (a0), a1
; CHECK-NEXT: vmv2r.v v8, v6
; CHECK-NEXT: vlsseg2e32.v v6, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vlsseg2.nxv4i32(i32* %base, i32 %offset, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e32, m2, ta, mu
; CHECK-NEXT: vlsseg3e32.v v6, (a0), a1
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vlsseg3.nxv4i32(i32* %base, i32 %offset, i32 %vl)
; CHECK-NEXT: vmv2r.v v8, v6
; CHECK-NEXT: vmv2r.v v10, v6
; CHECK-NEXT: vlsseg3e32.v v6, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vlsseg3.nxv4i32(i32* %base, i32 %offset, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e32, m2, ta, mu
; CHECK-NEXT: vlsseg4e32.v v6, (a0), a1
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vlsseg4.nxv4i32(i32* %base, i32 %offset, i32 %vl)
; CHECK-NEXT: vmv2r.v v10, v6
; CHECK-NEXT: vmv2r.v v12, v6
; CHECK-NEXT: vlsseg4e32.v v6, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vlsseg4.nxv4i32(i32* %base, i32 %offset, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e16, m4, ta, mu
; CHECK-NEXT: vlsseg2e16.v v4, (a0), a1
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 16 x half>,<vscale x 16 x half>} @llvm.riscv.vlsseg2.nxv16f16(half* %base, i32 %offset, i32 %vl)
; CHECK-NEXT: vlsseg2e16.v v4, (a0), a1
; CHECK-NEXT: vmv4r.v v8, v4
; CHECK-NEXT: vlsseg2e16.v v4, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 16 x half>,<vscale x 16 x half>} @llvm.riscv.vlsseg2.nxv16f16(half* %base, i32 %offset, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu
; CHECK-NEXT: vlsseg2e64.v v4, (a0), a1
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x double>,<vscale x 4 x double>} @llvm.riscv.vlsseg2.nxv4f64(double* %base, i32 %offset, i32 %vl)
; CHECK-NEXT: vlsseg2e64.v v4, (a0), a1
; CHECK-NEXT: vmv4r.v v8, v4
; CHECK-NEXT: vlsseg2e64.v v4, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x double>,<vscale x 4 x double>} @llvm.riscv.vlsseg2.nxv4f64(double* %base, i32 %offset, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu
; CHECK-NEXT: vlsseg2e64.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlsseg2.nxv1f64(double* %base, i32 %offset, i32 %vl)
; CHECK-NEXT: vlsseg2e64.v v7, (a0), a1
; CHECK-NEXT: vmv1r.v v8, v7
; CHECK-NEXT: vlsseg2e64.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlsseg2.nxv1f64(double* %base, i32 %offset, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu
; CHECK-NEXT: vlsseg3e64.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlsseg3.nxv1f64(double* %base, i32 %offset, i32 %vl)
; CHECK-NEXT: vmv1r.v v8, v7
; CHECK-NEXT: vmv1r.v v9, v7
; CHECK-NEXT: vlsseg3e64.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlsseg3.nxv1f64(double* %base, i32 %offset, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu
; CHECK-NEXT: vlsseg4e64.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlsseg4.nxv1f64(double* %base, i32 %offset, i32 %vl)
; CHECK-NEXT: vmv1r.v v9, v7
; CHECK-NEXT: vmv1r.v v10, v7
; CHECK-NEXT: vlsseg4e64.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlsseg4.nxv1f64(double* %base, i32 %offset, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu
; CHECK-NEXT: vlsseg5e64.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlsseg5.nxv1f64(double* %base, i32 %offset, i32 %vl)
; CHECK-NEXT: vmv1r.v v10, v7
; CHECK-NEXT: vmv1r.v v11, v7
; CHECK-NEXT: vlsseg5e64.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlsseg5.nxv1f64(double* %base, i32 %offset, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu
; CHECK-NEXT: vlsseg6e64.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlsseg6.nxv1f64(double* %base, i32 %offset, i32 %vl)
; CHECK-NEXT: vmv1r.v v11, v7
; CHECK-NEXT: vmv1r.v v12, v7
; CHECK-NEXT: vlsseg6e64.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlsseg6.nxv1f64(double* %base, i32 %offset, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu
; CHECK-NEXT: vlsseg7e64.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlsseg7.nxv1f64(double* %base, i32 %offset, i32 %vl)
; CHECK-NEXT: vmv1r.v v12, v7
; CHECK-NEXT: vmv1r.v v13, v7
; CHECK-NEXT: vlsseg7e64.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlsseg7.nxv1f64(double* %base, i32 %offset, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu
; CHECK-NEXT: vlsseg8e64.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlsseg8.nxv1f64(double* %base, i32 %offset, i32 %vl)
; CHECK-NEXT: vmv1r.v v13, v7
; CHECK-NEXT: vmv1r.v v14, v7
; CHECK-NEXT: vlsseg8e64.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlsseg8.nxv1f64(double* %base, i32 %offset, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, mu
; CHECK-NEXT: vlsseg2e32.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlsseg2.nxv2f32(float* %base, i32 %offset, i32 %vl)
; CHECK-NEXT: vlsseg2e32.v v7, (a0), a1
; CHECK-NEXT: vmv1r.v v8, v7
; CHECK-NEXT: vlsseg2e32.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlsseg2.nxv2f32(float* %base, i32 %offset, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, mu
; CHECK-NEXT: vlsseg3e32.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlsseg3.nxv2f32(float* %base, i32 %offset, i32 %vl)
; CHECK-NEXT: vmv1r.v v8, v7
; CHECK-NEXT: vmv1r.v v9, v7
; CHECK-NEXT: vlsseg3e32.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlsseg3.nxv2f32(float* %base, i32 %offset, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, mu
; CHECK-NEXT: vlsseg4e32.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlsseg4.nxv2f32(float* %base, i32 %offset, i32 %vl)
; CHECK-NEXT: vmv1r.v v9, v7
; CHECK-NEXT: vmv1r.v v10, v7
; CHECK-NEXT: vlsseg4e32.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlsseg4.nxv2f32(float* %base, i32 %offset, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, mu
; CHECK-NEXT: vlsseg5e32.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlsseg5.nxv2f32(float* %base, i32 %offset, i32 %vl)
; CHECK-NEXT: vmv1r.v v10, v7
; CHECK-NEXT: vmv1r.v v11, v7
; CHECK-NEXT: vlsseg5e32.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlsseg5.nxv2f32(float* %base, i32 %offset, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, mu
; CHECK-NEXT: vlsseg6e32.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlsseg6.nxv2f32(float* %base, i32 %offset, i32 %vl)
; CHECK-NEXT: vmv1r.v v11, v7
; CHECK-NEXT: vmv1r.v v12, v7
; CHECK-NEXT: vlsseg6e32.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlsseg6.nxv2f32(float* %base, i32 %offset, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, mu
; CHECK-NEXT: vlsseg7e32.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlsseg7.nxv2f32(float* %base, i32 %offset, i32 %vl)
; CHECK-NEXT: vmv1r.v v12, v7
; CHECK-NEXT: vmv1r.v v13, v7
; CHECK-NEXT: vlsseg7e32.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlsseg7.nxv2f32(float* %base, i32 %offset, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, mu
; CHECK-NEXT: vlsseg8e32.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlsseg8.nxv2f32(float* %base, i32 %offset, i32 %vl)
; CHECK-NEXT: vmv1r.v v13, v7
; CHECK-NEXT: vmv1r.v v14, v7
; CHECK-NEXT: vlsseg8e32.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlsseg8.nxv2f32(float* %base, i32 %offset, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, mu
; CHECK-NEXT: vlsseg2e16.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlsseg2.nxv1f16(half* %base, i32 %offset, i32 %vl)
; CHECK-NEXT: vlsseg2e16.v v7, (a0), a1
; CHECK-NEXT: vmv1r.v v8, v7
; CHECK-NEXT: vlsseg2e16.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlsseg2.nxv1f16(half* %base, i32 %offset, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, mu
; CHECK-NEXT: vlsseg3e16.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlsseg3.nxv1f16(half* %base, i32 %offset, i32 %vl)
; CHECK-NEXT: vmv1r.v v8, v7
; CHECK-NEXT: vmv1r.v v9, v7
; CHECK-NEXT: vlsseg3e16.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlsseg3.nxv1f16(half* %base, i32 %offset, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, mu
; CHECK-NEXT: vlsseg4e16.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlsseg4.nxv1f16(half* %base, i32 %offset, i32 %vl)
; CHECK-NEXT: vmv1r.v v9, v7
; CHECK-NEXT: vmv1r.v v10, v7
; CHECK-NEXT: vlsseg4e16.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlsseg4.nxv1f16(half* %base, i32 %offset, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, mu
; CHECK-NEXT: vlsseg5e16.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlsseg5.nxv1f16(half* %base, i32 %offset, i32 %vl)
; CHECK-NEXT: vmv1r.v v10, v7
; CHECK-NEXT: vmv1r.v v11, v7
; CHECK-NEXT: vlsseg5e16.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlsseg5.nxv1f16(half* %base, i32 %offset, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, mu
; CHECK-NEXT: vlsseg6e16.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlsseg6.nxv1f16(half* %base, i32 %offset, i32 %vl)
; CHECK-NEXT: vmv1r.v v11, v7
; CHECK-NEXT: vmv1r.v v12, v7
; CHECK-NEXT: vlsseg6e16.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlsseg6.nxv1f16(half* %base, i32 %offset, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, mu
; CHECK-NEXT: vlsseg7e16.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlsseg7.nxv1f16(half* %base, i32 %offset, i32 %vl)
; CHECK-NEXT: vmv1r.v v12, v7
; CHECK-NEXT: vmv1r.v v13, v7
; CHECK-NEXT: vlsseg7e16.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlsseg7.nxv1f16(half* %base, i32 %offset, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, mu
; CHECK-NEXT: vlsseg8e16.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlsseg8.nxv1f16(half* %base, i32 %offset, i32 %vl)
; CHECK-NEXT: vmv1r.v v13, v7
; CHECK-NEXT: vmv1r.v v14, v7
; CHECK-NEXT: vlsseg8e16.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlsseg8.nxv1f16(half* %base, i32 %offset, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, mu
; CHECK-NEXT: vlsseg2e32.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlsseg2.nxv1f32(float* %base, i32 %offset, i32 %vl)
; CHECK-NEXT: vlsseg2e32.v v7, (a0), a1
; CHECK-NEXT: vmv1r.v v8, v7
; CHECK-NEXT: vlsseg2e32.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlsseg2.nxv1f32(float* %base, i32 %offset, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, mu
; CHECK-NEXT: vlsseg3e32.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlsseg3.nxv1f32(float* %base, i32 %offset, i32 %vl)
; CHECK-NEXT: vmv1r.v v8, v7
; CHECK-NEXT: vmv1r.v v9, v7
; CHECK-NEXT: vlsseg3e32.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlsseg3.nxv1f32(float* %base, i32 %offset, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, mu
; CHECK-NEXT: vlsseg4e32.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlsseg4.nxv1f32(float* %base, i32 %offset, i32 %vl)
; CHECK-NEXT: vmv1r.v v9, v7
; CHECK-NEXT: vmv1r.v v10, v7
; CHECK-NEXT: vlsseg4e32.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlsseg4.nxv1f32(float* %base, i32 %offset, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, mu
; CHECK-NEXT: vlsseg5e32.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlsseg5.nxv1f32(float* %base, i32 %offset, i32 %vl)
; CHECK-NEXT: vmv1r.v v10, v7
; CHECK-NEXT: vmv1r.v v11, v7
; CHECK-NEXT: vlsseg5e32.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlsseg5.nxv1f32(float* %base, i32 %offset, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, mu
; CHECK-NEXT: vlsseg6e32.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlsseg6.nxv1f32(float* %base, i32 %offset, i32 %vl)
; CHECK-NEXT: vmv1r.v v11, v7
; CHECK-NEXT: vmv1r.v v12, v7
; CHECK-NEXT: vlsseg6e32.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlsseg6.nxv1f32(float* %base, i32 %offset, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, mu
; CHECK-NEXT: vlsseg7e32.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlsseg7.nxv1f32(float* %base, i32 %offset, i32 %vl)
; CHECK-NEXT: vmv1r.v v12, v7
; CHECK-NEXT: vmv1r.v v13, v7
; CHECK-NEXT: vlsseg7e32.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlsseg7.nxv1f32(float* %base, i32 %offset, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, mu
; CHECK-NEXT: vlsseg8e32.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlsseg8.nxv1f32(float* %base, i32 %offset, i32 %vl)
; CHECK-NEXT: vmv1r.v v13, v7
; CHECK-NEXT: vmv1r.v v14, v7
; CHECK-NEXT: vlsseg8e32.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlsseg8.nxv1f32(float* %base, i32 %offset, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, mu
; CHECK-NEXT: vlsseg2e16.v v6, (a0), a1
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vlsseg2.nxv8f16(half* %base, i32 %offset, i32 %vl)
; CHECK-NEXT: vlsseg2e16.v v6, (a0), a1
; CHECK-NEXT: vmv2r.v v8, v6
; CHECK-NEXT: vlsseg2e16.v v6, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vlsseg2.nxv8f16(half* %base, i32 %offset, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, mu
; CHECK-NEXT: vlsseg3e16.v v6, (a0), a1
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vlsseg3.nxv8f16(half* %base, i32 %offset, i32 %vl)
; CHECK-NEXT: vmv2r.v v8, v6
; CHECK-NEXT: vmv2r.v v10, v6
; CHECK-NEXT: vlsseg3e16.v v6, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vlsseg3.nxv8f16(half* %base, i32 %offset, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, mu
; CHECK-NEXT: vlsseg4e16.v v6, (a0), a1
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vlsseg4.nxv8f16(half* %base, i32 %offset, i32 %vl)
; CHECK-NEXT: vmv2r.v v10, v6
; CHECK-NEXT: vmv2r.v v12, v6
; CHECK-NEXT: vlsseg4e16.v v6, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vlsseg4.nxv8f16(half* %base, i32 %offset, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e32, m4, ta, mu
; CHECK-NEXT: vlsseg2e32.v v4, (a0), a1
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x float>,<vscale x 8 x float>} @llvm.riscv.vlsseg2.nxv8f32(float* %base, i32 %offset, i32 %vl)
; CHECK-NEXT: vlsseg2e32.v v4, (a0), a1
; CHECK-NEXT: vmv4r.v v8, v4
; CHECK-NEXT: vlsseg2e32.v v4, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x float>,<vscale x 8 x float>} @llvm.riscv.vlsseg2.nxv8f32(float* %base, i32 %offset, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu
; CHECK-NEXT: vlsseg2e64.v v6, (a0), a1
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vlsseg2.nxv2f64(double* %base, i32 %offset, i32 %vl)
; CHECK-NEXT: vlsseg2e64.v v6, (a0), a1
; CHECK-NEXT: vmv2r.v v8, v6
; CHECK-NEXT: vlsseg2e64.v v6, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vlsseg2.nxv2f64(double* %base, i32 %offset, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu
; CHECK-NEXT: vlsseg3e64.v v6, (a0), a1
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vlsseg3.nxv2f64(double* %base, i32 %offset, i32 %vl)
; CHECK-NEXT: vmv2r.v v8, v6
; CHECK-NEXT: vmv2r.v v10, v6
; CHECK-NEXT: vlsseg3e64.v v6, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vlsseg3.nxv2f64(double* %base, i32 %offset, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu
; CHECK-NEXT: vlsseg4e64.v v6, (a0), a1
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vlsseg4.nxv2f64(double* %base, i32 %offset, i32 %vl)
; CHECK-NEXT: vmv2r.v v10, v6
; CHECK-NEXT: vmv2r.v v12, v6
; CHECK-NEXT: vlsseg4e64.v v6, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vlsseg4.nxv2f64(double* %base, i32 %offset, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, mu
; CHECK-NEXT: vlsseg2e16.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlsseg2.nxv4f16(half* %base, i32 %offset, i32 %vl)
; CHECK-NEXT: vlsseg2e16.v v7, (a0), a1
; CHECK-NEXT: vmv1r.v v8, v7
; CHECK-NEXT: vlsseg2e16.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlsseg2.nxv4f16(half* %base, i32 %offset, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, mu
; CHECK-NEXT: vlsseg3e16.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlsseg3.nxv4f16(half* %base, i32 %offset, i32 %vl)
; CHECK-NEXT: vmv1r.v v8, v7
; CHECK-NEXT: vmv1r.v v9, v7
; CHECK-NEXT: vlsseg3e16.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlsseg3.nxv4f16(half* %base, i32 %offset, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, mu
; CHECK-NEXT: vlsseg4e16.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlsseg4.nxv4f16(half* %base, i32 %offset, i32 %vl)
; CHECK-NEXT: vmv1r.v v9, v7
; CHECK-NEXT: vmv1r.v v10, v7
; CHECK-NEXT: vlsseg4e16.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlsseg4.nxv4f16(half* %base, i32 %offset, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, mu
; CHECK-NEXT: vlsseg5e16.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlsseg5.nxv4f16(half* %base, i32 %offset, i32 %vl)
; CHECK-NEXT: vmv1r.v v10, v7
; CHECK-NEXT: vmv1r.v v11, v7
; CHECK-NEXT: vlsseg5e16.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlsseg5.nxv4f16(half* %base, i32 %offset, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, mu
; CHECK-NEXT: vlsseg6e16.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlsseg6.nxv4f16(half* %base, i32 %offset, i32 %vl)
; CHECK-NEXT: vmv1r.v v11, v7
; CHECK-NEXT: vmv1r.v v12, v7
; CHECK-NEXT: vlsseg6e16.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlsseg6.nxv4f16(half* %base, i32 %offset, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, mu
; CHECK-NEXT: vlsseg7e16.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlsseg7.nxv4f16(half* %base, i32 %offset, i32 %vl)
; CHECK-NEXT: vmv1r.v v12, v7
; CHECK-NEXT: vmv1r.v v13, v7
; CHECK-NEXT: vlsseg7e16.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlsseg7.nxv4f16(half* %base, i32 %offset, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, mu
; CHECK-NEXT: vlsseg8e16.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlsseg8.nxv4f16(half* %base, i32 %offset, i32 %vl)
; CHECK-NEXT: vmv1r.v v13, v7
; CHECK-NEXT: vmv1r.v v14, v7
; CHECK-NEXT: vlsseg8e16.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlsseg8.nxv4f16(half* %base, i32 %offset, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, mu
; CHECK-NEXT: vlsseg2e16.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlsseg2.nxv2f16(half* %base, i32 %offset, i32 %vl)
; CHECK-NEXT: vlsseg2e16.v v7, (a0), a1
; CHECK-NEXT: vmv1r.v v8, v7
; CHECK-NEXT: vlsseg2e16.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlsseg2.nxv2f16(half* %base, i32 %offset, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, mu
; CHECK-NEXT: vlsseg3e16.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlsseg3.nxv2f16(half* %base, i32 %offset, i32 %vl)
; CHECK-NEXT: vmv1r.v v8, v7
; CHECK-NEXT: vmv1r.v v9, v7
; CHECK-NEXT: vlsseg3e16.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlsseg3.nxv2f16(half* %base, i32 %offset, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, mu
; CHECK-NEXT: vlsseg4e16.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlsseg4.nxv2f16(half* %base, i32 %offset, i32 %vl)
; CHECK-NEXT: vmv1r.v v9, v7
; CHECK-NEXT: vmv1r.v v10, v7
; CHECK-NEXT: vlsseg4e16.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlsseg4.nxv2f16(half* %base, i32 %offset, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, mu
; CHECK-NEXT: vlsseg5e16.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlsseg5.nxv2f16(half* %base, i32 %offset, i32 %vl)
; CHECK-NEXT: vmv1r.v v10, v7
; CHECK-NEXT: vmv1r.v v11, v7
; CHECK-NEXT: vlsseg5e16.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlsseg5.nxv2f16(half* %base, i32 %offset, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, mu
; CHECK-NEXT: vlsseg6e16.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlsseg6.nxv2f16(half* %base, i32 %offset, i32 %vl)
; CHECK-NEXT: vmv1r.v v11, v7
; CHECK-NEXT: vmv1r.v v12, v7
; CHECK-NEXT: vlsseg6e16.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlsseg6.nxv2f16(half* %base, i32 %offset, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, mu
; CHECK-NEXT: vlsseg7e16.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlsseg7.nxv2f16(half* %base, i32 %offset, i32 %vl)
; CHECK-NEXT: vmv1r.v v12, v7
; CHECK-NEXT: vmv1r.v v13, v7
; CHECK-NEXT: vlsseg7e16.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlsseg7.nxv2f16(half* %base, i32 %offset, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, mu
; CHECK-NEXT: vlsseg8e16.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlsseg8.nxv2f16(half* %base, i32 %offset, i32 %vl)
; CHECK-NEXT: vmv1r.v v13, v7
; CHECK-NEXT: vmv1r.v v14, v7
; CHECK-NEXT: vlsseg8e16.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlsseg8.nxv2f16(half* %base, i32 %offset, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e32, m2, ta, mu
; CHECK-NEXT: vlsseg2e32.v v6, (a0), a1
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vlsseg2.nxv4f32(float* %base, i32 %offset, i32 %vl)
; CHECK-NEXT: vlsseg2e32.v v6, (a0), a1
; CHECK-NEXT: vmv2r.v v8, v6
; CHECK-NEXT: vlsseg2e32.v v6, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vlsseg2.nxv4f32(float* %base, i32 %offset, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e32, m2, ta, mu
; CHECK-NEXT: vlsseg3e32.v v6, (a0), a1
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vlsseg3.nxv4f32(float* %base, i32 %offset, i32 %vl)
; CHECK-NEXT: vmv2r.v v8, v6
; CHECK-NEXT: vmv2r.v v10, v6
; CHECK-NEXT: vlsseg3e32.v v6, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vlsseg3.nxv4f32(float* %base, i32 %offset, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e32, m2, ta, mu
; CHECK-NEXT: vlsseg4e32.v v6, (a0), a1
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vlsseg4.nxv4f32(float* %base, i32 %offset, i32 %vl)
; CHECK-NEXT: vmv2r.v v10, v6
; CHECK-NEXT: vmv2r.v v12, v6
; CHECK-NEXT: vlsseg4e32.v v6, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vlsseg4.nxv4f32(float* %base, i32 %offset, i32 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e16, m4, ta, mu
; CHECK-NEXT: vlsseg2e16.v v4, (a0), a1
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 16 x i16>,<vscale x 16 x i16>} @llvm.riscv.vlsseg2.nxv16i16(i16* %base, i64 %offset, i64 %vl)
; CHECK-NEXT: vlsseg2e16.v v4, (a0), a1
; CHECK-NEXT: vmv4r.v v8, v4
; CHECK-NEXT: vlsseg2e16.v v4, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 16 x i16>,<vscale x 16 x i16>} @llvm.riscv.vlsseg2.nxv16i16(i16* %base, i64 %offset, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e32, m2, ta, mu
; CHECK-NEXT: vlsseg2e32.v v6, (a0), a1
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vlsseg2.nxv4i32(i32* %base, i64 %offset, i64 %vl)
; CHECK-NEXT: vlsseg2e32.v v6, (a0), a1
; CHECK-NEXT: vmv2r.v v8, v6
; CHECK-NEXT: vlsseg2e32.v v6, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vlsseg2.nxv4i32(i32* %base, i64 %offset, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e32, m2, ta, mu
; CHECK-NEXT: vlsseg3e32.v v6, (a0), a1
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vlsseg3.nxv4i32(i32* %base, i64 %offset, i64 %vl)
; CHECK-NEXT: vmv2r.v v8, v6
; CHECK-NEXT: vmv2r.v v10, v6
; CHECK-NEXT: vlsseg3e32.v v6, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vlsseg3.nxv4i32(i32* %base, i64 %offset, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e32, m2, ta, mu
; CHECK-NEXT: vlsseg4e32.v v6, (a0), a1
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vlsseg4.nxv4i32(i32* %base, i64 %offset, i64 %vl)
; CHECK-NEXT: vmv2r.v v10, v6
; CHECK-NEXT: vmv2r.v v12, v6
; CHECK-NEXT: vlsseg4e32.v v6, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vlsseg4.nxv4i32(i32* %base, i64 %offset, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e8, m2, ta, mu
; CHECK-NEXT: vlsseg2e8.v v6, (a0), a1
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vlsseg2.nxv16i8(i8* %base, i64 %offset, i64 %vl)
; CHECK-NEXT: vlsseg2e8.v v6, (a0), a1
; CHECK-NEXT: vmv2r.v v8, v6
; CHECK-NEXT: vlsseg2e8.v v6, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vlsseg2.nxv16i8(i8* %base, i64 %offset, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e8, m2, ta, mu
; CHECK-NEXT: vlsseg3e8.v v6, (a0), a1
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vlsseg3.nxv16i8(i8* %base, i64 %offset, i64 %vl)
; CHECK-NEXT: vmv2r.v v8, v6
; CHECK-NEXT: vmv2r.v v10, v6
; CHECK-NEXT: vlsseg3e8.v v6, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vlsseg3.nxv16i8(i8* %base, i64 %offset, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e8, m2, ta, mu
; CHECK-NEXT: vlsseg4e8.v v6, (a0), a1
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vlsseg4.nxv16i8(i8* %base, i64 %offset, i64 %vl)
; CHECK-NEXT: vmv2r.v v10, v6
; CHECK-NEXT: vmv2r.v v12, v6
; CHECK-NEXT: vlsseg4e8.v v6, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vlsseg4.nxv16i8(i8* %base, i64 %offset, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu
; CHECK-NEXT: vlsseg2e64.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vlsseg2.nxv1i64(i64* %base, i64 %offset, i64 %vl)
; CHECK-NEXT: vlsseg2e64.v v7, (a0), a1
; CHECK-NEXT: vmv1r.v v8, v7
; CHECK-NEXT: vlsseg2e64.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vlsseg2.nxv1i64(i64* %base, i64 %offset, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu
; CHECK-NEXT: vlsseg3e64.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vlsseg3.nxv1i64(i64* %base, i64 %offset, i64 %vl)
; CHECK-NEXT: vmv1r.v v8, v7
; CHECK-NEXT: vmv1r.v v9, v7
; CHECK-NEXT: vlsseg3e64.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vlsseg3.nxv1i64(i64* %base, i64 %offset, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu
; CHECK-NEXT: vlsseg4e64.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vlsseg4.nxv1i64(i64* %base, i64 %offset, i64 %vl)
; CHECK-NEXT: vmv1r.v v9, v7
; CHECK-NEXT: vmv1r.v v10, v7
; CHECK-NEXT: vlsseg4e64.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vlsseg4.nxv1i64(i64* %base, i64 %offset, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu
; CHECK-NEXT: vlsseg5e64.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vlsseg5.nxv1i64(i64* %base, i64 %offset, i64 %vl)
; CHECK-NEXT: vmv1r.v v10, v7
; CHECK-NEXT: vmv1r.v v11, v7
; CHECK-NEXT: vlsseg5e64.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vlsseg5.nxv1i64(i64* %base, i64 %offset, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu
; CHECK-NEXT: vlsseg6e64.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vlsseg6.nxv1i64(i64* %base, i64 %offset, i64 %vl)
; CHECK-NEXT: vmv1r.v v11, v7
; CHECK-NEXT: vmv1r.v v12, v7
; CHECK-NEXT: vlsseg6e64.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vlsseg6.nxv1i64(i64* %base, i64 %offset, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu
; CHECK-NEXT: vlsseg7e64.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vlsseg7.nxv1i64(i64* %base, i64 %offset, i64 %vl)
; CHECK-NEXT: vmv1r.v v12, v7
; CHECK-NEXT: vmv1r.v v13, v7
; CHECK-NEXT: vlsseg7e64.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vlsseg7.nxv1i64(i64* %base, i64 %offset, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu
; CHECK-NEXT: vlsseg8e64.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vlsseg8.nxv1i64(i64* %base, i64 %offset, i64 %vl)
; CHECK-NEXT: vmv1r.v v13, v7
; CHECK-NEXT: vmv1r.v v14, v7
; CHECK-NEXT: vlsseg8e64.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vlsseg8.nxv1i64(i64* %base, i64 %offset, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, mu
; CHECK-NEXT: vlsseg2e32.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlsseg2.nxv1i32(i32* %base, i64 %offset, i64 %vl)
; CHECK-NEXT: vlsseg2e32.v v7, (a0), a1
; CHECK-NEXT: vmv1r.v v8, v7
; CHECK-NEXT: vlsseg2e32.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlsseg2.nxv1i32(i32* %base, i64 %offset, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, mu
; CHECK-NEXT: vlsseg3e32.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlsseg3.nxv1i32(i32* %base, i64 %offset, i64 %vl)
; CHECK-NEXT: vmv1r.v v8, v7
; CHECK-NEXT: vmv1r.v v9, v7
; CHECK-NEXT: vlsseg3e32.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlsseg3.nxv1i32(i32* %base, i64 %offset, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, mu
; CHECK-NEXT: vlsseg4e32.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlsseg4.nxv1i32(i32* %base, i64 %offset, i64 %vl)
; CHECK-NEXT: vmv1r.v v9, v7
; CHECK-NEXT: vmv1r.v v10, v7
; CHECK-NEXT: vlsseg4e32.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlsseg4.nxv1i32(i32* %base, i64 %offset, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, mu
; CHECK-NEXT: vlsseg5e32.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlsseg5.nxv1i32(i32* %base, i64 %offset, i64 %vl)
; CHECK-NEXT: vmv1r.v v10, v7
; CHECK-NEXT: vmv1r.v v11, v7
; CHECK-NEXT: vlsseg5e32.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlsseg5.nxv1i32(i32* %base, i64 %offset, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, mu
; CHECK-NEXT: vlsseg6e32.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlsseg6.nxv1i32(i32* %base, i64 %offset, i64 %vl)
; CHECK-NEXT: vmv1r.v v11, v7
; CHECK-NEXT: vmv1r.v v12, v7
; CHECK-NEXT: vlsseg6e32.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlsseg6.nxv1i32(i32* %base, i64 %offset, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, mu
; CHECK-NEXT: vlsseg7e32.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlsseg7.nxv1i32(i32* %base, i64 %offset, i64 %vl)
; CHECK-NEXT: vmv1r.v v12, v7
; CHECK-NEXT: vmv1r.v v13, v7
; CHECK-NEXT: vlsseg7e32.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlsseg7.nxv1i32(i32* %base, i64 %offset, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, mu
; CHECK-NEXT: vlsseg8e32.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlsseg8.nxv1i32(i32* %base, i64 %offset, i64 %vl)
; CHECK-NEXT: vmv1r.v v13, v7
; CHECK-NEXT: vmv1r.v v14, v7
; CHECK-NEXT: vlsseg8e32.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlsseg8.nxv1i32(i32* %base, i64 %offset, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, mu
; CHECK-NEXT: vlsseg2e16.v v6, (a0), a1
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vlsseg2.nxv8i16(i16* %base, i64 %offset, i64 %vl)
; CHECK-NEXT: vlsseg2e16.v v6, (a0), a1
; CHECK-NEXT: vmv2r.v v8, v6
; CHECK-NEXT: vlsseg2e16.v v6, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vlsseg2.nxv8i16(i16* %base, i64 %offset, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, mu
; CHECK-NEXT: vlsseg3e16.v v6, (a0), a1
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vlsseg3.nxv8i16(i16* %base, i64 %offset, i64 %vl)
; CHECK-NEXT: vmv2r.v v8, v6
; CHECK-NEXT: vmv2r.v v10, v6
; CHECK-NEXT: vlsseg3e16.v v6, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vlsseg3.nxv8i16(i16* %base, i64 %offset, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, mu
; CHECK-NEXT: vlsseg4e16.v v6, (a0), a1
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vlsseg4.nxv8i16(i16* %base, i64 %offset, i64 %vl)
; CHECK-NEXT: vmv2r.v v10, v6
; CHECK-NEXT: vmv2r.v v12, v6
; CHECK-NEXT: vlsseg4e16.v v6, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vlsseg4.nxv8i16(i16* %base, i64 %offset, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e8, mf2, ta, mu
; CHECK-NEXT: vlsseg2e8.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlsseg2.nxv4i8(i8* %base, i64 %offset, i64 %vl)
; CHECK-NEXT: vlsseg2e8.v v7, (a0), a1
; CHECK-NEXT: vmv1r.v v8, v7
; CHECK-NEXT: vlsseg2e8.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlsseg2.nxv4i8(i8* %base, i64 %offset, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e8, mf2, ta, mu
; CHECK-NEXT: vlsseg3e8.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlsseg3.nxv4i8(i8* %base, i64 %offset, i64 %vl)
; CHECK-NEXT: vmv1r.v v8, v7
; CHECK-NEXT: vmv1r.v v9, v7
; CHECK-NEXT: vlsseg3e8.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlsseg3.nxv4i8(i8* %base, i64 %offset, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e8, mf2, ta, mu
; CHECK-NEXT: vlsseg4e8.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlsseg4.nxv4i8(i8* %base, i64 %offset, i64 %vl)
; CHECK-NEXT: vmv1r.v v9, v7
; CHECK-NEXT: vmv1r.v v10, v7
; CHECK-NEXT: vlsseg4e8.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlsseg4.nxv4i8(i8* %base, i64 %offset, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e8, mf2, ta, mu
; CHECK-NEXT: vlsseg5e8.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlsseg5.nxv4i8(i8* %base, i64 %offset, i64 %vl)
; CHECK-NEXT: vmv1r.v v10, v7
; CHECK-NEXT: vmv1r.v v11, v7
; CHECK-NEXT: vlsseg5e8.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlsseg5.nxv4i8(i8* %base, i64 %offset, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e8, mf2, ta, mu
; CHECK-NEXT: vlsseg6e8.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlsseg6.nxv4i8(i8* %base, i64 %offset, i64 %vl)
; CHECK-NEXT: vmv1r.v v11, v7
; CHECK-NEXT: vmv1r.v v12, v7
; CHECK-NEXT: vlsseg6e8.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlsseg6.nxv4i8(i8* %base, i64 %offset, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e8, mf2, ta, mu
; CHECK-NEXT: vlsseg7e8.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlsseg7.nxv4i8(i8* %base, i64 %offset, i64 %vl)
; CHECK-NEXT: vmv1r.v v12, v7
; CHECK-NEXT: vmv1r.v v13, v7
; CHECK-NEXT: vlsseg7e8.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlsseg7.nxv4i8(i8* %base, i64 %offset, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e8, mf2, ta, mu
; CHECK-NEXT: vlsseg8e8.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlsseg8.nxv4i8(i8* %base, i64 %offset, i64 %vl)
; CHECK-NEXT: vmv1r.v v13, v7
; CHECK-NEXT: vmv1r.v v14, v7
; CHECK-NEXT: vlsseg8e8.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlsseg8.nxv4i8(i8* %base, i64 %offset, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, mu
; CHECK-NEXT: vlsseg2e16.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlsseg2.nxv1i16(i16* %base, i64 %offset, i64 %vl)
; CHECK-NEXT: vlsseg2e16.v v7, (a0), a1
; CHECK-NEXT: vmv1r.v v8, v7
; CHECK-NEXT: vlsseg2e16.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlsseg2.nxv1i16(i16* %base, i64 %offset, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, mu
; CHECK-NEXT: vlsseg3e16.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlsseg3.nxv1i16(i16* %base, i64 %offset, i64 %vl)
; CHECK-NEXT: vmv1r.v v8, v7
; CHECK-NEXT: vmv1r.v v9, v7
; CHECK-NEXT: vlsseg3e16.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlsseg3.nxv1i16(i16* %base, i64 %offset, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, mu
; CHECK-NEXT: vlsseg4e16.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlsseg4.nxv1i16(i16* %base, i64 %offset, i64 %vl)
; CHECK-NEXT: vmv1r.v v9, v7
; CHECK-NEXT: vmv1r.v v10, v7
; CHECK-NEXT: vlsseg4e16.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlsseg4.nxv1i16(i16* %base, i64 %offset, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, mu
; CHECK-NEXT: vlsseg5e16.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlsseg5.nxv1i16(i16* %base, i64 %offset, i64 %vl)
; CHECK-NEXT: vmv1r.v v10, v7
; CHECK-NEXT: vmv1r.v v11, v7
; CHECK-NEXT: vlsseg5e16.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlsseg5.nxv1i16(i16* %base, i64 %offset, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, mu
; CHECK-NEXT: vlsseg6e16.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlsseg6.nxv1i16(i16* %base, i64 %offset, i64 %vl)
; CHECK-NEXT: vmv1r.v v11, v7
; CHECK-NEXT: vmv1r.v v12, v7
; CHECK-NEXT: vlsseg6e16.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlsseg6.nxv1i16(i16* %base, i64 %offset, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, mu
; CHECK-NEXT: vlsseg7e16.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlsseg7.nxv1i16(i16* %base, i64 %offset, i64 %vl)
; CHECK-NEXT: vmv1r.v v12, v7
; CHECK-NEXT: vmv1r.v v13, v7
; CHECK-NEXT: vlsseg7e16.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlsseg7.nxv1i16(i16* %base, i64 %offset, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, mu
; CHECK-NEXT: vlsseg8e16.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlsseg8.nxv1i16(i16* %base, i64 %offset, i64 %vl)
; CHECK-NEXT: vmv1r.v v13, v7
; CHECK-NEXT: vmv1r.v v14, v7
; CHECK-NEXT: vlsseg8e16.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlsseg8.nxv1i16(i16* %base, i64 %offset, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, mu
; CHECK-NEXT: vlsseg2e32.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlsseg2.nxv2i32(i32* %base, i64 %offset, i64 %vl)
; CHECK-NEXT: vlsseg2e32.v v7, (a0), a1
; CHECK-NEXT: vmv1r.v v8, v7
; CHECK-NEXT: vlsseg2e32.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlsseg2.nxv2i32(i32* %base, i64 %offset, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, mu
; CHECK-NEXT: vlsseg3e32.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlsseg3.nxv2i32(i32* %base, i64 %offset, i64 %vl)
; CHECK-NEXT: vmv1r.v v8, v7
; CHECK-NEXT: vmv1r.v v9, v7
; CHECK-NEXT: vlsseg3e32.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlsseg3.nxv2i32(i32* %base, i64 %offset, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, mu
; CHECK-NEXT: vlsseg4e32.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlsseg4.nxv2i32(i32* %base, i64 %offset, i64 %vl)
; CHECK-NEXT: vmv1r.v v9, v7
; CHECK-NEXT: vmv1r.v v10, v7
; CHECK-NEXT: vlsseg4e32.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlsseg4.nxv2i32(i32* %base, i64 %offset, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, mu
; CHECK-NEXT: vlsseg5e32.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlsseg5.nxv2i32(i32* %base, i64 %offset, i64 %vl)
; CHECK-NEXT: vmv1r.v v10, v7
; CHECK-NEXT: vmv1r.v v11, v7
; CHECK-NEXT: vlsseg5e32.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlsseg5.nxv2i32(i32* %base, i64 %offset, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, mu
; CHECK-NEXT: vlsseg6e32.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlsseg6.nxv2i32(i32* %base, i64 %offset, i64 %vl)
; CHECK-NEXT: vmv1r.v v11, v7
; CHECK-NEXT: vmv1r.v v12, v7
; CHECK-NEXT: vlsseg6e32.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlsseg6.nxv2i32(i32* %base, i64 %offset, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, mu
; CHECK-NEXT: vlsseg7e32.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlsseg7.nxv2i32(i32* %base, i64 %offset, i64 %vl)
; CHECK-NEXT: vmv1r.v v12, v7
; CHECK-NEXT: vmv1r.v v13, v7
; CHECK-NEXT: vlsseg7e32.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlsseg7.nxv2i32(i32* %base, i64 %offset, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, mu
; CHECK-NEXT: vlsseg8e32.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlsseg8.nxv2i32(i32* %base, i64 %offset, i64 %vl)
; CHECK-NEXT: vmv1r.v v13, v7
; CHECK-NEXT: vmv1r.v v14, v7
; CHECK-NEXT: vlsseg8e32.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlsseg8.nxv2i32(i32* %base, i64 %offset, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e8, m1, ta, mu
; CHECK-NEXT: vlsseg2e8.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlsseg2.nxv8i8(i8* %base, i64 %offset, i64 %vl)
; CHECK-NEXT: vlsseg2e8.v v7, (a0), a1
; CHECK-NEXT: vmv1r.v v8, v7
; CHECK-NEXT: vlsseg2e8.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlsseg2.nxv8i8(i8* %base, i64 %offset, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e8, m1, ta, mu
; CHECK-NEXT: vlsseg3e8.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlsseg3.nxv8i8(i8* %base, i64 %offset, i64 %vl)
; CHECK-NEXT: vmv1r.v v8, v7
; CHECK-NEXT: vmv1r.v v9, v7
; CHECK-NEXT: vlsseg3e8.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlsseg3.nxv8i8(i8* %base, i64 %offset, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e8, m1, ta, mu
; CHECK-NEXT: vlsseg4e8.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlsseg4.nxv8i8(i8* %base, i64 %offset, i64 %vl)
; CHECK-NEXT: vmv1r.v v9, v7
; CHECK-NEXT: vmv1r.v v10, v7
; CHECK-NEXT: vlsseg4e8.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlsseg4.nxv8i8(i8* %base, i64 %offset, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e8, m1, ta, mu
; CHECK-NEXT: vlsseg5e8.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlsseg5.nxv8i8(i8* %base, i64 %offset, i64 %vl)
; CHECK-NEXT: vmv1r.v v10, v7
; CHECK-NEXT: vmv1r.v v11, v7
; CHECK-NEXT: vlsseg5e8.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlsseg5.nxv8i8(i8* %base, i64 %offset, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e8, m1, ta, mu
; CHECK-NEXT: vlsseg6e8.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlsseg6.nxv8i8(i8* %base, i64 %offset, i64 %vl)
; CHECK-NEXT: vmv1r.v v11, v7
; CHECK-NEXT: vmv1r.v v12, v7
; CHECK-NEXT: vlsseg6e8.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlsseg6.nxv8i8(i8* %base, i64 %offset, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e8, m1, ta, mu
; CHECK-NEXT: vlsseg7e8.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlsseg7.nxv8i8(i8* %base, i64 %offset, i64 %vl)
; CHECK-NEXT: vmv1r.v v12, v7
; CHECK-NEXT: vmv1r.v v13, v7
; CHECK-NEXT: vlsseg7e8.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlsseg7.nxv8i8(i8* %base, i64 %offset, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e8, m1, ta, mu
; CHECK-NEXT: vlsseg8e8.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlsseg8.nxv8i8(i8* %base, i64 %offset, i64 %vl)
; CHECK-NEXT: vmv1r.v v13, v7
; CHECK-NEXT: vmv1r.v v14, v7
; CHECK-NEXT: vlsseg8e8.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlsseg8.nxv8i8(i8* %base, i64 %offset, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu
; CHECK-NEXT: vlsseg2e64.v v4, (a0), a1
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i64>,<vscale x 4 x i64>} @llvm.riscv.vlsseg2.nxv4i64(i64* %base, i64 %offset, i64 %vl)
; CHECK-NEXT: vlsseg2e64.v v4, (a0), a1
; CHECK-NEXT: vmv4r.v v8, v4
; CHECK-NEXT: vlsseg2e64.v v4, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i64>,<vscale x 4 x i64>} @llvm.riscv.vlsseg2.nxv4i64(i64* %base, i64 %offset, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, mu
; CHECK-NEXT: vlsseg2e16.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlsseg2.nxv4i16(i16* %base, i64 %offset, i64 %vl)
; CHECK-NEXT: vlsseg2e16.v v7, (a0), a1
; CHECK-NEXT: vmv1r.v v8, v7
; CHECK-NEXT: vlsseg2e16.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlsseg2.nxv4i16(i16* %base, i64 %offset, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, mu
; CHECK-NEXT: vlsseg3e16.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlsseg3.nxv4i16(i16* %base, i64 %offset, i64 %vl)
; CHECK-NEXT: vmv1r.v v8, v7
; CHECK-NEXT: vmv1r.v v9, v7
; CHECK-NEXT: vlsseg3e16.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlsseg3.nxv4i16(i16* %base, i64 %offset, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, mu
; CHECK-NEXT: vlsseg4e16.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlsseg4.nxv4i16(i16* %base, i64 %offset, i64 %vl)
; CHECK-NEXT: vmv1r.v v9, v7
; CHECK-NEXT: vmv1r.v v10, v7
; CHECK-NEXT: vlsseg4e16.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlsseg4.nxv4i16(i16* %base, i64 %offset, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, mu
; CHECK-NEXT: vlsseg5e16.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlsseg5.nxv4i16(i16* %base, i64 %offset, i64 %vl)
; CHECK-NEXT: vmv1r.v v10, v7
; CHECK-NEXT: vmv1r.v v11, v7
; CHECK-NEXT: vlsseg5e16.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlsseg5.nxv4i16(i16* %base, i64 %offset, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, mu
; CHECK-NEXT: vlsseg6e16.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlsseg6.nxv4i16(i16* %base, i64 %offset, i64 %vl)
; CHECK-NEXT: vmv1r.v v11, v7
; CHECK-NEXT: vmv1r.v v12, v7
; CHECK-NEXT: vlsseg6e16.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlsseg6.nxv4i16(i16* %base, i64 %offset, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, mu
; CHECK-NEXT: vlsseg7e16.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlsseg7.nxv4i16(i16* %base, i64 %offset, i64 %vl)
; CHECK-NEXT: vmv1r.v v12, v7
; CHECK-NEXT: vmv1r.v v13, v7
; CHECK-NEXT: vlsseg7e16.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlsseg7.nxv4i16(i16* %base, i64 %offset, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, mu
; CHECK-NEXT: vlsseg8e16.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlsseg8.nxv4i16(i16* %base, i64 %offset, i64 %vl)
; CHECK-NEXT: vmv1r.v v13, v7
; CHECK-NEXT: vmv1r.v v14, v7
; CHECK-NEXT: vlsseg8e16.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlsseg8.nxv4i16(i16* %base, i64 %offset, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e8, mf8, ta, mu
; CHECK-NEXT: vlsseg2e8.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlsseg2.nxv1i8(i8* %base, i64 %offset, i64 %vl)
; CHECK-NEXT: vlsseg2e8.v v7, (a0), a1
; CHECK-NEXT: vmv1r.v v8, v7
; CHECK-NEXT: vlsseg2e8.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlsseg2.nxv1i8(i8* %base, i64 %offset, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e8, mf8, ta, mu
; CHECK-NEXT: vlsseg3e8.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlsseg3.nxv1i8(i8* %base, i64 %offset, i64 %vl)
; CHECK-NEXT: vmv1r.v v8, v7
; CHECK-NEXT: vmv1r.v v9, v7
; CHECK-NEXT: vlsseg3e8.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlsseg3.nxv1i8(i8* %base, i64 %offset, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e8, mf8, ta, mu
; CHECK-NEXT: vlsseg4e8.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlsseg4.nxv1i8(i8* %base, i64 %offset, i64 %vl)
; CHECK-NEXT: vmv1r.v v9, v7
; CHECK-NEXT: vmv1r.v v10, v7
; CHECK-NEXT: vlsseg4e8.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlsseg4.nxv1i8(i8* %base, i64 %offset, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e8, mf8, ta, mu
; CHECK-NEXT: vlsseg5e8.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlsseg5.nxv1i8(i8* %base, i64 %offset, i64 %vl)
; CHECK-NEXT: vmv1r.v v10, v7
; CHECK-NEXT: vmv1r.v v11, v7
; CHECK-NEXT: vlsseg5e8.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlsseg5.nxv1i8(i8* %base, i64 %offset, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e8, mf8, ta, mu
; CHECK-NEXT: vlsseg6e8.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlsseg6.nxv1i8(i8* %base, i64 %offset, i64 %vl)
; CHECK-NEXT: vmv1r.v v11, v7
; CHECK-NEXT: vmv1r.v v12, v7
; CHECK-NEXT: vlsseg6e8.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlsseg6.nxv1i8(i8* %base, i64 %offset, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e8, mf8, ta, mu
; CHECK-NEXT: vlsseg7e8.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlsseg7.nxv1i8(i8* %base, i64 %offset, i64 %vl)
; CHECK-NEXT: vmv1r.v v12, v7
; CHECK-NEXT: vmv1r.v v13, v7
; CHECK-NEXT: vlsseg7e8.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlsseg7.nxv1i8(i8* %base, i64 %offset, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e8, mf8, ta, mu
; CHECK-NEXT: vlsseg8e8.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlsseg8.nxv1i8(i8* %base, i64 %offset, i64 %vl)
; CHECK-NEXT: vmv1r.v v13, v7
; CHECK-NEXT: vmv1r.v v14, v7
; CHECK-NEXT: vlsseg8e8.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlsseg8.nxv1i8(i8* %base, i64 %offset, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e8, mf4, ta, mu
; CHECK-NEXT: vlsseg2e8.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlsseg2.nxv2i8(i8* %base, i64 %offset, i64 %vl)
; CHECK-NEXT: vlsseg2e8.v v7, (a0), a1
; CHECK-NEXT: vmv1r.v v8, v7
; CHECK-NEXT: vlsseg2e8.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlsseg2.nxv2i8(i8* %base, i64 %offset, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e8, mf4, ta, mu
; CHECK-NEXT: vlsseg3e8.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlsseg3.nxv2i8(i8* %base, i64 %offset, i64 %vl)
; CHECK-NEXT: vmv1r.v v8, v7
; CHECK-NEXT: vmv1r.v v9, v7
; CHECK-NEXT: vlsseg3e8.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlsseg3.nxv2i8(i8* %base, i64 %offset, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e8, mf4, ta, mu
; CHECK-NEXT: vlsseg4e8.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlsseg4.nxv2i8(i8* %base, i64 %offset, i64 %vl)
; CHECK-NEXT: vmv1r.v v9, v7
; CHECK-NEXT: vmv1r.v v10, v7
; CHECK-NEXT: vlsseg4e8.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlsseg4.nxv2i8(i8* %base, i64 %offset, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e8, mf4, ta, mu
; CHECK-NEXT: vlsseg5e8.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlsseg5.nxv2i8(i8* %base, i64 %offset, i64 %vl)
; CHECK-NEXT: vmv1r.v v10, v7
; CHECK-NEXT: vmv1r.v v11, v7
; CHECK-NEXT: vlsseg5e8.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlsseg5.nxv2i8(i8* %base, i64 %offset, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e8, mf4, ta, mu
; CHECK-NEXT: vlsseg6e8.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlsseg6.nxv2i8(i8* %base, i64 %offset, i64 %vl)
; CHECK-NEXT: vmv1r.v v11, v7
; CHECK-NEXT: vmv1r.v v12, v7
; CHECK-NEXT: vlsseg6e8.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlsseg6.nxv2i8(i8* %base, i64 %offset, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e8, mf4, ta, mu
; CHECK-NEXT: vlsseg7e8.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlsseg7.nxv2i8(i8* %base, i64 %offset, i64 %vl)
; CHECK-NEXT: vmv1r.v v12, v7
; CHECK-NEXT: vmv1r.v v13, v7
; CHECK-NEXT: vlsseg7e8.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlsseg7.nxv2i8(i8* %base, i64 %offset, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e8, mf4, ta, mu
; CHECK-NEXT: vlsseg8e8.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlsseg8.nxv2i8(i8* %base, i64 %offset, i64 %vl)
; CHECK-NEXT: vmv1r.v v13, v7
; CHECK-NEXT: vmv1r.v v14, v7
; CHECK-NEXT: vlsseg8e8.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlsseg8.nxv2i8(i8* %base, i64 %offset, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e32, m4, ta, mu
; CHECK-NEXT: vlsseg2e32.v v4, (a0), a1
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i32>,<vscale x 8 x i32>} @llvm.riscv.vlsseg2.nxv8i32(i32* %base, i64 %offset, i64 %vl)
; CHECK-NEXT: vlsseg2e32.v v4, (a0), a1
; CHECK-NEXT: vmv4r.v v8, v4
; CHECK-NEXT: vlsseg2e32.v v4, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i32>,<vscale x 8 x i32>} @llvm.riscv.vlsseg2.nxv8i32(i32* %base, i64 %offset, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e8, m4, ta, mu
; CHECK-NEXT: vlsseg2e8.v v4, (a0), a1
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 32 x i8>,<vscale x 32 x i8>} @llvm.riscv.vlsseg2.nxv32i8(i8* %base, i64 %offset, i64 %vl)
; CHECK-NEXT: vlsseg2e8.v v4, (a0), a1
; CHECK-NEXT: vmv4r.v v8, v4
; CHECK-NEXT: vlsseg2e8.v v4, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 32 x i8>,<vscale x 32 x i8>} @llvm.riscv.vlsseg2.nxv32i8(i8* %base, i64 %offset, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, mu
; CHECK-NEXT: vlsseg2e16.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlsseg2.nxv2i16(i16* %base, i64 %offset, i64 %vl)
; CHECK-NEXT: vlsseg2e16.v v7, (a0), a1
; CHECK-NEXT: vmv1r.v v8, v7
; CHECK-NEXT: vlsseg2e16.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlsseg2.nxv2i16(i16* %base, i64 %offset, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, mu
; CHECK-NEXT: vlsseg3e16.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlsseg3.nxv2i16(i16* %base, i64 %offset, i64 %vl)
; CHECK-NEXT: vmv1r.v v8, v7
; CHECK-NEXT: vmv1r.v v9, v7
; CHECK-NEXT: vlsseg3e16.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlsseg3.nxv2i16(i16* %base, i64 %offset, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, mu
; CHECK-NEXT: vlsseg4e16.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlsseg4.nxv2i16(i16* %base, i64 %offset, i64 %vl)
; CHECK-NEXT: vmv1r.v v9, v7
; CHECK-NEXT: vmv1r.v v10, v7
; CHECK-NEXT: vlsseg4e16.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlsseg4.nxv2i16(i16* %base, i64 %offset, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, mu
; CHECK-NEXT: vlsseg5e16.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlsseg5.nxv2i16(i16* %base, i64 %offset, i64 %vl)
; CHECK-NEXT: vmv1r.v v10, v7
; CHECK-NEXT: vmv1r.v v11, v7
; CHECK-NEXT: vlsseg5e16.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlsseg5.nxv2i16(i16* %base, i64 %offset, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, mu
; CHECK-NEXT: vlsseg6e16.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlsseg6.nxv2i16(i16* %base, i64 %offset, i64 %vl)
; CHECK-NEXT: vmv1r.v v11, v7
; CHECK-NEXT: vmv1r.v v12, v7
; CHECK-NEXT: vlsseg6e16.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlsseg6.nxv2i16(i16* %base, i64 %offset, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, mu
; CHECK-NEXT: vlsseg7e16.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlsseg7.nxv2i16(i16* %base, i64 %offset, i64 %vl)
; CHECK-NEXT: vmv1r.v v12, v7
; CHECK-NEXT: vmv1r.v v13, v7
; CHECK-NEXT: vlsseg7e16.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlsseg7.nxv2i16(i16* %base, i64 %offset, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, mu
; CHECK-NEXT: vlsseg8e16.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlsseg8.nxv2i16(i16* %base, i64 %offset, i64 %vl)
; CHECK-NEXT: vmv1r.v v13, v7
; CHECK-NEXT: vmv1r.v v14, v7
; CHECK-NEXT: vlsseg8e16.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlsseg8.nxv2i16(i16* %base, i64 %offset, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu
; CHECK-NEXT: vlsseg2e64.v v6, (a0), a1
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i64>,<vscale x 2 x i64>} @llvm.riscv.vlsseg2.nxv2i64(i64* %base, i64 %offset, i64 %vl)
; CHECK-NEXT: vlsseg2e64.v v6, (a0), a1
; CHECK-NEXT: vmv2r.v v8, v6
; CHECK-NEXT: vlsseg2e64.v v6, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i64>,<vscale x 2 x i64>} @llvm.riscv.vlsseg2.nxv2i64(i64* %base, i64 %offset, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu
; CHECK-NEXT: vlsseg3e64.v v6, (a0), a1
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>} @llvm.riscv.vlsseg3.nxv2i64(i64* %base, i64 %offset, i64 %vl)
; CHECK-NEXT: vmv2r.v v8, v6
; CHECK-NEXT: vmv2r.v v10, v6
; CHECK-NEXT: vlsseg3e64.v v6, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>} @llvm.riscv.vlsseg3.nxv2i64(i64* %base, i64 %offset, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu
; CHECK-NEXT: vlsseg4e64.v v6, (a0), a1
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>} @llvm.riscv.vlsseg4.nxv2i64(i64* %base, i64 %offset, i64 %vl)
; CHECK-NEXT: vmv2r.v v10, v6
; CHECK-NEXT: vmv2r.v v12, v6
; CHECK-NEXT: vlsseg4e64.v v6, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>} @llvm.riscv.vlsseg4.nxv2i64(i64* %base, i64 %offset, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e16, m4, ta, mu
; CHECK-NEXT: vlsseg2e16.v v4, (a0), a1
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 16 x half>,<vscale x 16 x half>} @llvm.riscv.vlsseg2.nxv16f16(half* %base, i64 %offset, i64 %vl)
; CHECK-NEXT: vlsseg2e16.v v4, (a0), a1
; CHECK-NEXT: vmv4r.v v8, v4
; CHECK-NEXT: vlsseg2e16.v v4, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 16 x half>,<vscale x 16 x half>} @llvm.riscv.vlsseg2.nxv16f16(half* %base, i64 %offset, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu
; CHECK-NEXT: vlsseg2e64.v v4, (a0), a1
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x double>,<vscale x 4 x double>} @llvm.riscv.vlsseg2.nxv4f64(double* %base, i64 %offset, i64 %vl)
; CHECK-NEXT: vlsseg2e64.v v4, (a0), a1
; CHECK-NEXT: vmv4r.v v8, v4
; CHECK-NEXT: vlsseg2e64.v v4, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x double>,<vscale x 4 x double>} @llvm.riscv.vlsseg2.nxv4f64(double* %base, i64 %offset, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu
; CHECK-NEXT: vlsseg2e64.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlsseg2.nxv1f64(double* %base, i64 %offset, i64 %vl)
; CHECK-NEXT: vlsseg2e64.v v7, (a0), a1
; CHECK-NEXT: vmv1r.v v8, v7
; CHECK-NEXT: vlsseg2e64.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlsseg2.nxv1f64(double* %base, i64 %offset, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu
; CHECK-NEXT: vlsseg3e64.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlsseg3.nxv1f64(double* %base, i64 %offset, i64 %vl)
; CHECK-NEXT: vmv1r.v v8, v7
; CHECK-NEXT: vmv1r.v v9, v7
; CHECK-NEXT: vlsseg3e64.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlsseg3.nxv1f64(double* %base, i64 %offset, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu
; CHECK-NEXT: vlsseg4e64.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlsseg4.nxv1f64(double* %base, i64 %offset, i64 %vl)
; CHECK-NEXT: vmv1r.v v9, v7
; CHECK-NEXT: vmv1r.v v10, v7
; CHECK-NEXT: vlsseg4e64.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlsseg4.nxv1f64(double* %base, i64 %offset, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu
; CHECK-NEXT: vlsseg5e64.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlsseg5.nxv1f64(double* %base, i64 %offset, i64 %vl)
; CHECK-NEXT: vmv1r.v v10, v7
; CHECK-NEXT: vmv1r.v v11, v7
; CHECK-NEXT: vlsseg5e64.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlsseg5.nxv1f64(double* %base, i64 %offset, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu
; CHECK-NEXT: vlsseg6e64.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlsseg6.nxv1f64(double* %base, i64 %offset, i64 %vl)
; CHECK-NEXT: vmv1r.v v11, v7
; CHECK-NEXT: vmv1r.v v12, v7
; CHECK-NEXT: vlsseg6e64.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlsseg6.nxv1f64(double* %base, i64 %offset, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu
; CHECK-NEXT: vlsseg7e64.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlsseg7.nxv1f64(double* %base, i64 %offset, i64 %vl)
; CHECK-NEXT: vmv1r.v v12, v7
; CHECK-NEXT: vmv1r.v v13, v7
; CHECK-NEXT: vlsseg7e64.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlsseg7.nxv1f64(double* %base, i64 %offset, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu
; CHECK-NEXT: vlsseg8e64.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlsseg8.nxv1f64(double* %base, i64 %offset, i64 %vl)
; CHECK-NEXT: vmv1r.v v13, v7
; CHECK-NEXT: vmv1r.v v14, v7
; CHECK-NEXT: vlsseg8e64.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlsseg8.nxv1f64(double* %base, i64 %offset, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, mu
; CHECK-NEXT: vlsseg2e32.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlsseg2.nxv2f32(float* %base, i64 %offset, i64 %vl)
; CHECK-NEXT: vlsseg2e32.v v7, (a0), a1
; CHECK-NEXT: vmv1r.v v8, v7
; CHECK-NEXT: vlsseg2e32.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlsseg2.nxv2f32(float* %base, i64 %offset, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, mu
; CHECK-NEXT: vlsseg3e32.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlsseg3.nxv2f32(float* %base, i64 %offset, i64 %vl)
; CHECK-NEXT: vmv1r.v v8, v7
; CHECK-NEXT: vmv1r.v v9, v7
; CHECK-NEXT: vlsseg3e32.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlsseg3.nxv2f32(float* %base, i64 %offset, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, mu
; CHECK-NEXT: vlsseg4e32.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlsseg4.nxv2f32(float* %base, i64 %offset, i64 %vl)
; CHECK-NEXT: vmv1r.v v9, v7
; CHECK-NEXT: vmv1r.v v10, v7
; CHECK-NEXT: vlsseg4e32.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlsseg4.nxv2f32(float* %base, i64 %offset, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, mu
; CHECK-NEXT: vlsseg5e32.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlsseg5.nxv2f32(float* %base, i64 %offset, i64 %vl)
; CHECK-NEXT: vmv1r.v v10, v7
; CHECK-NEXT: vmv1r.v v11, v7
; CHECK-NEXT: vlsseg5e32.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlsseg5.nxv2f32(float* %base, i64 %offset, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, mu
; CHECK-NEXT: vlsseg6e32.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlsseg6.nxv2f32(float* %base, i64 %offset, i64 %vl)
; CHECK-NEXT: vmv1r.v v11, v7
; CHECK-NEXT: vmv1r.v v12, v7
; CHECK-NEXT: vlsseg6e32.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlsseg6.nxv2f32(float* %base, i64 %offset, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, mu
; CHECK-NEXT: vlsseg7e32.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlsseg7.nxv2f32(float* %base, i64 %offset, i64 %vl)
; CHECK-NEXT: vmv1r.v v12, v7
; CHECK-NEXT: vmv1r.v v13, v7
; CHECK-NEXT: vlsseg7e32.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlsseg7.nxv2f32(float* %base, i64 %offset, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, mu
; CHECK-NEXT: vlsseg8e32.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlsseg8.nxv2f32(float* %base, i64 %offset, i64 %vl)
; CHECK-NEXT: vmv1r.v v13, v7
; CHECK-NEXT: vmv1r.v v14, v7
; CHECK-NEXT: vlsseg8e32.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlsseg8.nxv2f32(float* %base, i64 %offset, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, mu
; CHECK-NEXT: vlsseg2e16.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlsseg2.nxv1f16(half* %base, i64 %offset, i64 %vl)
; CHECK-NEXT: vlsseg2e16.v v7, (a0), a1
; CHECK-NEXT: vmv1r.v v8, v7
; CHECK-NEXT: vlsseg2e16.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlsseg2.nxv1f16(half* %base, i64 %offset, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, mu
; CHECK-NEXT: vlsseg3e16.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlsseg3.nxv1f16(half* %base, i64 %offset, i64 %vl)
; CHECK-NEXT: vmv1r.v v8, v7
; CHECK-NEXT: vmv1r.v v9, v7
; CHECK-NEXT: vlsseg3e16.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlsseg3.nxv1f16(half* %base, i64 %offset, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, mu
; CHECK-NEXT: vlsseg4e16.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlsseg4.nxv1f16(half* %base, i64 %offset, i64 %vl)
; CHECK-NEXT: vmv1r.v v9, v7
; CHECK-NEXT: vmv1r.v v10, v7
; CHECK-NEXT: vlsseg4e16.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlsseg4.nxv1f16(half* %base, i64 %offset, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, mu
; CHECK-NEXT: vlsseg5e16.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlsseg5.nxv1f16(half* %base, i64 %offset, i64 %vl)
; CHECK-NEXT: vmv1r.v v10, v7
; CHECK-NEXT: vmv1r.v v11, v7
; CHECK-NEXT: vlsseg5e16.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlsseg5.nxv1f16(half* %base, i64 %offset, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, mu
; CHECK-NEXT: vlsseg6e16.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlsseg6.nxv1f16(half* %base, i64 %offset, i64 %vl)
; CHECK-NEXT: vmv1r.v v11, v7
; CHECK-NEXT: vmv1r.v v12, v7
; CHECK-NEXT: vlsseg6e16.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlsseg6.nxv1f16(half* %base, i64 %offset, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, mu
; CHECK-NEXT: vlsseg7e16.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlsseg7.nxv1f16(half* %base, i64 %offset, i64 %vl)
; CHECK-NEXT: vmv1r.v v12, v7
; CHECK-NEXT: vmv1r.v v13, v7
; CHECK-NEXT: vlsseg7e16.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlsseg7.nxv1f16(half* %base, i64 %offset, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, mu
; CHECK-NEXT: vlsseg8e16.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlsseg8.nxv1f16(half* %base, i64 %offset, i64 %vl)
; CHECK-NEXT: vmv1r.v v13, v7
; CHECK-NEXT: vmv1r.v v14, v7
; CHECK-NEXT: vlsseg8e16.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlsseg8.nxv1f16(half* %base, i64 %offset, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, mu
; CHECK-NEXT: vlsseg2e32.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlsseg2.nxv1f32(float* %base, i64 %offset, i64 %vl)
; CHECK-NEXT: vlsseg2e32.v v7, (a0), a1
; CHECK-NEXT: vmv1r.v v8, v7
; CHECK-NEXT: vlsseg2e32.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlsseg2.nxv1f32(float* %base, i64 %offset, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, mu
; CHECK-NEXT: vlsseg3e32.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlsseg3.nxv1f32(float* %base, i64 %offset, i64 %vl)
; CHECK-NEXT: vmv1r.v v8, v7
; CHECK-NEXT: vmv1r.v v9, v7
; CHECK-NEXT: vlsseg3e32.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlsseg3.nxv1f32(float* %base, i64 %offset, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, mu
; CHECK-NEXT: vlsseg4e32.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlsseg4.nxv1f32(float* %base, i64 %offset, i64 %vl)
; CHECK-NEXT: vmv1r.v v9, v7
; CHECK-NEXT: vmv1r.v v10, v7
; CHECK-NEXT: vlsseg4e32.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlsseg4.nxv1f32(float* %base, i64 %offset, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, mu
; CHECK-NEXT: vlsseg5e32.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlsseg5.nxv1f32(float* %base, i64 %offset, i64 %vl)
; CHECK-NEXT: vmv1r.v v10, v7
; CHECK-NEXT: vmv1r.v v11, v7
; CHECK-NEXT: vlsseg5e32.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlsseg5.nxv1f32(float* %base, i64 %offset, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, mu
; CHECK-NEXT: vlsseg6e32.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlsseg6.nxv1f32(float* %base, i64 %offset, i64 %vl)
; CHECK-NEXT: vmv1r.v v11, v7
; CHECK-NEXT: vmv1r.v v12, v7
; CHECK-NEXT: vlsseg6e32.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlsseg6.nxv1f32(float* %base, i64 %offset, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, mu
; CHECK-NEXT: vlsseg7e32.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlsseg7.nxv1f32(float* %base, i64 %offset, i64 %vl)
; CHECK-NEXT: vmv1r.v v12, v7
; CHECK-NEXT: vmv1r.v v13, v7
; CHECK-NEXT: vlsseg7e32.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlsseg7.nxv1f32(float* %base, i64 %offset, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, mu
; CHECK-NEXT: vlsseg8e32.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlsseg8.nxv1f32(float* %base, i64 %offset, i64 %vl)
; CHECK-NEXT: vmv1r.v v13, v7
; CHECK-NEXT: vmv1r.v v14, v7
; CHECK-NEXT: vlsseg8e32.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlsseg8.nxv1f32(float* %base, i64 %offset, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, mu
; CHECK-NEXT: vlsseg2e16.v v6, (a0), a1
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vlsseg2.nxv8f16(half* %base, i64 %offset, i64 %vl)
; CHECK-NEXT: vlsseg2e16.v v6, (a0), a1
; CHECK-NEXT: vmv2r.v v8, v6
; CHECK-NEXT: vlsseg2e16.v v6, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vlsseg2.nxv8f16(half* %base, i64 %offset, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, mu
; CHECK-NEXT: vlsseg3e16.v v6, (a0), a1
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vlsseg3.nxv8f16(half* %base, i64 %offset, i64 %vl)
; CHECK-NEXT: vmv2r.v v8, v6
; CHECK-NEXT: vmv2r.v v10, v6
; CHECK-NEXT: vlsseg3e16.v v6, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vlsseg3.nxv8f16(half* %base, i64 %offset, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, mu
; CHECK-NEXT: vlsseg4e16.v v6, (a0), a1
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vlsseg4.nxv8f16(half* %base, i64 %offset, i64 %vl)
; CHECK-NEXT: vmv2r.v v10, v6
; CHECK-NEXT: vmv2r.v v12, v6
; CHECK-NEXT: vlsseg4e16.v v6, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vlsseg4.nxv8f16(half* %base, i64 %offset, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e32, m4, ta, mu
; CHECK-NEXT: vlsseg2e32.v v4, (a0), a1
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x float>,<vscale x 8 x float>} @llvm.riscv.vlsseg2.nxv8f32(float* %base, i64 %offset, i64 %vl)
; CHECK-NEXT: vlsseg2e32.v v4, (a0), a1
; CHECK-NEXT: vmv4r.v v8, v4
; CHECK-NEXT: vlsseg2e32.v v4, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x float>,<vscale x 8 x float>} @llvm.riscv.vlsseg2.nxv8f32(float* %base, i64 %offset, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu
; CHECK-NEXT: vlsseg2e64.v v6, (a0), a1
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vlsseg2.nxv2f64(double* %base, i64 %offset, i64 %vl)
; CHECK-NEXT: vlsseg2e64.v v6, (a0), a1
; CHECK-NEXT: vmv2r.v v8, v6
; CHECK-NEXT: vlsseg2e64.v v6, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vlsseg2.nxv2f64(double* %base, i64 %offset, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu
; CHECK-NEXT: vlsseg3e64.v v6, (a0), a1
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vlsseg3.nxv2f64(double* %base, i64 %offset, i64 %vl)
; CHECK-NEXT: vmv2r.v v8, v6
; CHECK-NEXT: vmv2r.v v10, v6
; CHECK-NEXT: vlsseg3e64.v v6, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vlsseg3.nxv2f64(double* %base, i64 %offset, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu
; CHECK-NEXT: vlsseg4e64.v v6, (a0), a1
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vlsseg4.nxv2f64(double* %base, i64 %offset, i64 %vl)
; CHECK-NEXT: vmv2r.v v10, v6
; CHECK-NEXT: vmv2r.v v12, v6
; CHECK-NEXT: vlsseg4e64.v v6, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vlsseg4.nxv2f64(double* %base, i64 %offset, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, mu
; CHECK-NEXT: vlsseg2e16.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlsseg2.nxv4f16(half* %base, i64 %offset, i64 %vl)
; CHECK-NEXT: vlsseg2e16.v v7, (a0), a1
; CHECK-NEXT: vmv1r.v v8, v7
; CHECK-NEXT: vlsseg2e16.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlsseg2.nxv4f16(half* %base, i64 %offset, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, mu
; CHECK-NEXT: vlsseg3e16.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlsseg3.nxv4f16(half* %base, i64 %offset, i64 %vl)
; CHECK-NEXT: vmv1r.v v8, v7
; CHECK-NEXT: vmv1r.v v9, v7
; CHECK-NEXT: vlsseg3e16.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlsseg3.nxv4f16(half* %base, i64 %offset, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, mu
; CHECK-NEXT: vlsseg4e16.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlsseg4.nxv4f16(half* %base, i64 %offset, i64 %vl)
; CHECK-NEXT: vmv1r.v v9, v7
; CHECK-NEXT: vmv1r.v v10, v7
; CHECK-NEXT: vlsseg4e16.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlsseg4.nxv4f16(half* %base, i64 %offset, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, mu
; CHECK-NEXT: vlsseg5e16.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlsseg5.nxv4f16(half* %base, i64 %offset, i64 %vl)
; CHECK-NEXT: vmv1r.v v10, v7
; CHECK-NEXT: vmv1r.v v11, v7
; CHECK-NEXT: vlsseg5e16.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlsseg5.nxv4f16(half* %base, i64 %offset, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, mu
; CHECK-NEXT: vlsseg6e16.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlsseg6.nxv4f16(half* %base, i64 %offset, i64 %vl)
; CHECK-NEXT: vmv1r.v v11, v7
; CHECK-NEXT: vmv1r.v v12, v7
; CHECK-NEXT: vlsseg6e16.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlsseg6.nxv4f16(half* %base, i64 %offset, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, mu
; CHECK-NEXT: vlsseg7e16.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlsseg7.nxv4f16(half* %base, i64 %offset, i64 %vl)
; CHECK-NEXT: vmv1r.v v12, v7
; CHECK-NEXT: vmv1r.v v13, v7
; CHECK-NEXT: vlsseg7e16.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlsseg7.nxv4f16(half* %base, i64 %offset, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, mu
; CHECK-NEXT: vlsseg8e16.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlsseg8.nxv4f16(half* %base, i64 %offset, i64 %vl)
; CHECK-NEXT: vmv1r.v v13, v7
; CHECK-NEXT: vmv1r.v v14, v7
; CHECK-NEXT: vlsseg8e16.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlsseg8.nxv4f16(half* %base, i64 %offset, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, mu
; CHECK-NEXT: vlsseg2e16.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlsseg2.nxv2f16(half* %base, i64 %offset, i64 %vl)
; CHECK-NEXT: vlsseg2e16.v v7, (a0), a1
; CHECK-NEXT: vmv1r.v v8, v7
; CHECK-NEXT: vlsseg2e16.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlsseg2.nxv2f16(half* %base, i64 %offset, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, mu
; CHECK-NEXT: vlsseg3e16.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlsseg3.nxv2f16(half* %base, i64 %offset, i64 %vl)
; CHECK-NEXT: vmv1r.v v8, v7
; CHECK-NEXT: vmv1r.v v9, v7
; CHECK-NEXT: vlsseg3e16.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlsseg3.nxv2f16(half* %base, i64 %offset, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, mu
; CHECK-NEXT: vlsseg4e16.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlsseg4.nxv2f16(half* %base, i64 %offset, i64 %vl)
; CHECK-NEXT: vmv1r.v v9, v7
; CHECK-NEXT: vmv1r.v v10, v7
; CHECK-NEXT: vlsseg4e16.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlsseg4.nxv2f16(half* %base, i64 %offset, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, mu
; CHECK-NEXT: vlsseg5e16.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlsseg5.nxv2f16(half* %base, i64 %offset, i64 %vl)
; CHECK-NEXT: vmv1r.v v10, v7
; CHECK-NEXT: vmv1r.v v11, v7
; CHECK-NEXT: vlsseg5e16.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlsseg5.nxv2f16(half* %base, i64 %offset, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, mu
; CHECK-NEXT: vlsseg6e16.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlsseg6.nxv2f16(half* %base, i64 %offset, i64 %vl)
; CHECK-NEXT: vmv1r.v v11, v7
; CHECK-NEXT: vmv1r.v v12, v7
; CHECK-NEXT: vlsseg6e16.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlsseg6.nxv2f16(half* %base, i64 %offset, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, mu
; CHECK-NEXT: vlsseg7e16.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlsseg7.nxv2f16(half* %base, i64 %offset, i64 %vl)
; CHECK-NEXT: vmv1r.v v12, v7
; CHECK-NEXT: vmv1r.v v13, v7
; CHECK-NEXT: vlsseg7e16.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlsseg7.nxv2f16(half* %base, i64 %offset, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, mu
; CHECK-NEXT: vlsseg8e16.v v7, (a0), a1
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlsseg8.nxv2f16(half* %base, i64 %offset, i64 %vl)
; CHECK-NEXT: vmv1r.v v13, v7
; CHECK-NEXT: vmv1r.v v14, v7
; CHECK-NEXT: vlsseg8e16.v v7, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlsseg8.nxv2f16(half* %base, i64 %offset, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e32, m2, ta, mu
; CHECK-NEXT: vlsseg2e32.v v6, (a0), a1
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vlsseg2.nxv4f32(float* %base, i64 %offset, i64 %vl)
; CHECK-NEXT: vlsseg2e32.v v6, (a0), a1
; CHECK-NEXT: vmv2r.v v8, v6
; CHECK-NEXT: vlsseg2e32.v v6, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vlsseg2.nxv4f32(float* %base, i64 %offset, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e32, m2, ta, mu
; CHECK-NEXT: vlsseg3e32.v v6, (a0), a1
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vlsseg3.nxv4f32(float* %base, i64 %offset, i64 %vl)
; CHECK-NEXT: vmv2r.v v8, v6
; CHECK-NEXT: vmv2r.v v10, v6
; CHECK-NEXT: vlsseg3e32.v v6, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vlsseg3.nxv4f32(float* %base, i64 %offset, i64 %vl)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a2, e32, m2, ta, mu
; CHECK-NEXT: vlsseg4e32.v v6, (a0), a1
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vlsseg4.nxv4f32(float* %base, i64 %offset, i64 %vl)
; CHECK-NEXT: vmv2r.v v10, v6
; CHECK-NEXT: vmv2r.v v12, v6
; CHECK-NEXT: vlsseg4e32.v v6, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vlsseg4.nxv4f32(float* %base, i64 %offset, i64 %vl)
; CHECK-NEXT: vmv4r.v v4, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
; CHECK-NEXT: vluxseg2ei16.v v4, (a0), v12, v0.t
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 16 x i16>,<vscale x 16 x i16>} @llvm.riscv.vluxseg2.mask.nxv16i16.nxv16i16(<vscale x 16 x i16> %val,<vscale x 16 x i16> %val, i16* %base, <vscale x 16 x i16> %index, <vscale x 16 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vmv4r.v v4, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
; CHECK-NEXT: vluxseg2ei8.v v4, (a0), v12, v0.t
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 16 x i16>,<vscale x 16 x i16>} @llvm.riscv.vluxseg2.mask.nxv16i16.nxv16i8(<vscale x 16 x i16> %val,<vscale x 16 x i16> %val, i16* %base, <vscale x 16 x i8> %index, <vscale x 16 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vmv4r.v v4, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
; CHECK-NEXT: vluxseg2ei32.v v4, (a0), v16, v0.t
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 16 x i16>,<vscale x 16 x i16>} @llvm.riscv.vluxseg2.mask.nxv16i16.nxv16i32(<vscale x 16 x i16> %val,<vscale x 16 x i16> %val, i16* %base, <vscale x 16 x i32> %index, <vscale x 16 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vluxseg2ei8.v v7, (a0), v9, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg2.mask.nxv1i8.nxv1i8(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vluxseg2ei32.v v7, (a0), v9, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg2.mask.nxv1i8.nxv1i32(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vluxseg2ei16.v v7, (a0), v9, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg2.mask.nxv1i8.nxv1i16(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
define <vscale x 1 x i8> @test_vluxseg3_mask_nxv1i8_nxv1i8(<vscale x 1 x i8> %val, i8* %base, <vscale x 1 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
; CHECK-LABEL: test_vluxseg3_mask_nxv1i8_nxv1i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v10, v9
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
-; CHECK-NEXT: vluxseg3ei8.v v10, (a0), v9, v0.t
-; CHECK-NEXT: vmv1r.v v8, v11
+; CHECK-NEXT: vluxseg3ei8.v v7, (a0), v10, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg3.mask.nxv1i8.nxv1i8(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
define <vscale x 1 x i8> @test_vluxseg3_mask_nxv1i8_nxv1i32(<vscale x 1 x i8> %val, i8* %base, <vscale x 1 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
; CHECK-LABEL: test_vluxseg3_mask_nxv1i8_nxv1i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v10, v9
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
-; CHECK-NEXT: vluxseg3ei32.v v10, (a0), v9, v0.t
-; CHECK-NEXT: vmv1r.v v8, v11
+; CHECK-NEXT: vluxseg3ei32.v v7, (a0), v10, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg3.mask.nxv1i8.nxv1i32(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
define <vscale x 1 x i8> @test_vluxseg3_mask_nxv1i8_nxv1i16(<vscale x 1 x i8> %val, i8* %base, <vscale x 1 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
; CHECK-LABEL: test_vluxseg3_mask_nxv1i8_nxv1i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v10, v9
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
-; CHECK-NEXT: vluxseg3ei16.v v10, (a0), v9, v0.t
-; CHECK-NEXT: vmv1r.v v8, v11
+; CHECK-NEXT: vluxseg3ei16.v v7, (a0), v10, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg3.mask.nxv1i8.nxv1i16(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
; CHECK-LABEL: test_vluxseg4_mask_nxv1i8_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vluxseg4ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg4_mask_nxv1i8_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vluxseg4ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg4_mask_nxv1i8_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vluxseg4ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg5_mask_nxv1i8_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vluxseg5ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg5_mask_nxv1i8_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vluxseg5ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg5_mask_nxv1i8_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vluxseg5ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg6_mask_nxv1i8_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vluxseg6ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg6_mask_nxv1i8_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vluxseg6ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg6_mask_nxv1i8_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vluxseg6ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg7_mask_nxv1i8_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vluxseg7ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg7_mask_nxv1i8_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vluxseg7ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg7_mask_nxv1i8_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vluxseg7ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg8_mask_nxv1i8_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vluxseg8ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg8_mask_nxv1i8_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vluxseg8ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg8_mask_nxv1i8_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vluxseg8ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-NEXT: vmv2r.v v6, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
; CHECK-NEXT: vluxseg2ei16.v v6, (a0), v12, v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg2.mask.nxv16i8.nxv16i16(<vscale x 16 x i8> %val,<vscale x 16 x i8> %val, i8* %base, <vscale x 16 x i16> %index, <vscale x 16 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vmv2r.v v6, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
; CHECK-NEXT: vluxseg2ei8.v v6, (a0), v10, v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg2.mask.nxv16i8.nxv16i8(<vscale x 16 x i8> %val,<vscale x 16 x i8> %val, i8* %base, <vscale x 16 x i8> %index, <vscale x 16 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vmv2r.v v6, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
; CHECK-NEXT: vluxseg2ei32.v v6, (a0), v16, v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg2.mask.nxv16i8.nxv16i32(<vscale x 16 x i8> %val,<vscale x 16 x i8> %val, i8* %base, <vscale x 16 x i32> %index, <vscale x 16 x i1> %mask, i32 %vl, i32 1)
; CHECK-LABEL: test_vluxseg3_mask_nxv16i8_nxv16i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v6, v8
-; CHECK-NEXT: vmv2r.v v10, v6
+; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
; CHECK-NEXT: vluxseg3ei16.v v6, (a0), v12, v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg3.mask.nxv16i8.nxv16i16(<vscale x 16 x i8> %val,<vscale x 16 x i8> %val,<vscale x 16 x i8> %val, i8* %base, <vscale x 16 x i16> %index, <vscale x 16 x i1> %mask, i32 %vl, i32 1)
define <vscale x 16 x i8> @test_vluxseg3_mask_nxv16i8_nxv16i8(<vscale x 16 x i8> %val, i8* %base, <vscale x 16 x i8> %index, i32 %vl, <vscale x 16 x i1> %mask) {
; CHECK-LABEL: test_vluxseg3_mask_nxv16i8_nxv16i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v6, v8
+; CHECK-NEXT: vmv2r.v v12, v10
+; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
-; CHECK-NEXT: vluxseg3ei8.v v12, (a0), v10, v0.t
-; CHECK-NEXT: vmv2r.v v8, v14
+; CHECK-NEXT: vluxseg3ei8.v v6, (a0), v12, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg3.mask.nxv16i8.nxv16i8(<vscale x 16 x i8> %val,<vscale x 16 x i8> %val,<vscale x 16 x i8> %val, i8* %base, <vscale x 16 x i8> %index, <vscale x 16 x i1> %mask, i32 %vl, i32 1)
; CHECK-LABEL: test_vluxseg3_mask_nxv16i8_nxv16i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v6, v8
-; CHECK-NEXT: vmv2r.v v10, v6
+; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
; CHECK-NEXT: vluxseg3ei32.v v6, (a0), v16, v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg3.mask.nxv16i8.nxv16i32(<vscale x 16 x i8> %val,<vscale x 16 x i8> %val,<vscale x 16 x i8> %val, i8* %base, <vscale x 16 x i32> %index, <vscale x 16 x i1> %mask, i32 %vl, i32 1)
define <vscale x 16 x i8> @test_vluxseg4_mask_nxv16i8_nxv16i16(<vscale x 16 x i8> %val, i8* %base, <vscale x 16 x i16> %index, i32 %vl, <vscale x 16 x i1> %mask) {
; CHECK-LABEL: test_vluxseg4_mask_nxv16i8_nxv16i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv2r.v v16, v8
-; CHECK-NEXT: vmv2r.v v18, v16
-; CHECK-NEXT: vmv2r.v v20, v16
-; CHECK-NEXT: vmv2r.v v22, v16
+; CHECK-NEXT: vmv2r.v v6, v8
+; CHECK-NEXT: vmv2r.v v10, v8
+; CHECK-NEXT: vmv4r.v v16, v12
+; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
-; CHECK-NEXT: vluxseg4ei16.v v16, (a0), v12, v0.t
-; CHECK-NEXT: vmv2r.v v8, v18
+; CHECK-NEXT: vluxseg4ei16.v v6, (a0), v16, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg4.mask.nxv16i8.nxv16i16(<vscale x 16 x i8> %val,<vscale x 16 x i8> %val,<vscale x 16 x i8> %val,<vscale x 16 x i8> %val, i8* %base, <vscale x 16 x i16> %index, <vscale x 16 x i1> %mask, i32 %vl, i32 1)
; CHECK-LABEL: test_vluxseg4_mask_nxv16i8_nxv16i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
; CHECK-NEXT: vluxseg4ei8.v v12, (a0), v10, v0.t
; CHECK-NEXT: vmv2r.v v8, v14
; CHECK-LABEL: test_vluxseg4_mask_nxv16i8_nxv16i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v6, v8
-; CHECK-NEXT: vmv2r.v v10, v6
-; CHECK-NEXT: vmv2r.v v12, v6
+; CHECK-NEXT: vmv2r.v v10, v8
+; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
; CHECK-NEXT: vluxseg4ei32.v v6, (a0), v16, v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg4.mask.nxv16i8.nxv16i32(<vscale x 16 x i8> %val,<vscale x 16 x i8> %val,<vscale x 16 x i8> %val,<vscale x 16 x i8> %val, i8* %base, <vscale x 16 x i32> %index, <vscale x 16 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vluxseg2ei32.v v7, (a0), v9, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg2.mask.nxv2i32.nxv2i32(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vluxseg2ei8.v v7, (a0), v9, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg2.mask.nxv2i32.nxv2i8(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vluxseg2ei16.v v7, (a0), v9, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg2.mask.nxv2i32.nxv2i16(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
define <vscale x 2 x i32> @test_vluxseg3_mask_nxv2i32_nxv2i32(<vscale x 2 x i32> %val, i32* %base, <vscale x 2 x i32> %index, i32 %vl, <vscale x 2 x i1> %mask) {
; CHECK-LABEL: test_vluxseg3_mask_nxv2i32_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v10, v9
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
-; CHECK-NEXT: vluxseg3ei32.v v10, (a0), v9, v0.t
-; CHECK-NEXT: vmv1r.v v8, v11
+; CHECK-NEXT: vluxseg3ei32.v v7, (a0), v10, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg3.mask.nxv2i32.nxv2i32(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
define <vscale x 2 x i32> @test_vluxseg3_mask_nxv2i32_nxv2i8(<vscale x 2 x i32> %val, i32* %base, <vscale x 2 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
; CHECK-LABEL: test_vluxseg3_mask_nxv2i32_nxv2i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v10, v9
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
-; CHECK-NEXT: vluxseg3ei8.v v10, (a0), v9, v0.t
-; CHECK-NEXT: vmv1r.v v8, v11
+; CHECK-NEXT: vluxseg3ei8.v v7, (a0), v10, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg3.mask.nxv2i32.nxv2i8(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
define <vscale x 2 x i32> @test_vluxseg3_mask_nxv2i32_nxv2i16(<vscale x 2 x i32> %val, i32* %base, <vscale x 2 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
; CHECK-LABEL: test_vluxseg3_mask_nxv2i32_nxv2i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v10, v9
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
-; CHECK-NEXT: vluxseg3ei16.v v10, (a0), v9, v0.t
-; CHECK-NEXT: vmv1r.v v8, v11
+; CHECK-NEXT: vluxseg3ei16.v v7, (a0), v10, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg3.mask.nxv2i32.nxv2i16(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
; CHECK-LABEL: test_vluxseg4_mask_nxv2i32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vluxseg4ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg4_mask_nxv2i32_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vluxseg4ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg4_mask_nxv2i32_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vluxseg4ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg5_mask_nxv2i32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vluxseg5ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg5_mask_nxv2i32_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vluxseg5ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg5_mask_nxv2i32_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vluxseg5ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg6_mask_nxv2i32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vluxseg6ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg6_mask_nxv2i32_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vluxseg6ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg6_mask_nxv2i32_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vluxseg6ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg7_mask_nxv2i32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vluxseg7ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg7_mask_nxv2i32_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vluxseg7ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg7_mask_nxv2i32_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vluxseg7ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg8_mask_nxv2i32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vluxseg8ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg8_mask_nxv2i32_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vluxseg8ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg8_mask_nxv2i32_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vluxseg8ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vluxseg2ei16.v v7, (a0), v9, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg2.mask.nxv4i16.nxv4i16(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vluxseg2ei8.v v7, (a0), v9, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg2.mask.nxv4i16.nxv4i8(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vluxseg2ei32.v v7, (a0), v10, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg2.mask.nxv4i16.nxv4i32(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
define <vscale x 4 x i16> @test_vluxseg3_mask_nxv4i16_nxv4i16(<vscale x 4 x i16> %val, i16* %base, <vscale x 4 x i16> %index, i32 %vl, <vscale x 4 x i1> %mask) {
; CHECK-LABEL: test_vluxseg3_mask_nxv4i16_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v10, v9
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
-; CHECK-NEXT: vluxseg3ei16.v v10, (a0), v9, v0.t
-; CHECK-NEXT: vmv1r.v v8, v11
+; CHECK-NEXT: vluxseg3ei16.v v7, (a0), v10, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg3.mask.nxv4i16.nxv4i16(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
define <vscale x 4 x i16> @test_vluxseg3_mask_nxv4i16_nxv4i8(<vscale x 4 x i16> %val, i16* %base, <vscale x 4 x i8> %index, i32 %vl, <vscale x 4 x i1> %mask) {
; CHECK-LABEL: test_vluxseg3_mask_nxv4i16_nxv4i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v10, v9
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
-; CHECK-NEXT: vluxseg3ei8.v v10, (a0), v9, v0.t
-; CHECK-NEXT: vmv1r.v v8, v11
+; CHECK-NEXT: vluxseg3ei8.v v7, (a0), v10, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg3.mask.nxv4i16.nxv4i8(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
; CHECK-LABEL: test_vluxseg3_mask_nxv4i16_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vluxseg3ei32.v v7, (a0), v10, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg3.mask.nxv4i16.nxv4i32(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
; CHECK-LABEL: test_vluxseg4_mask_nxv4i16_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vluxseg4ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg4_mask_nxv4i16_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vluxseg4ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
define <vscale x 4 x i16> @test_vluxseg4_mask_nxv4i16_nxv4i32(<vscale x 4 x i16> %val, i16* %base, <vscale x 4 x i32> %index, i32 %vl, <vscale x 4 x i1> %mask) {
; CHECK-LABEL: test_vluxseg4_mask_nxv4i16_nxv4i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv2r.v v12, v10
+; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
-; CHECK-NEXT: vluxseg4ei32.v v12, (a0), v10, v0.t
-; CHECK-NEXT: vmv1r.v v8, v13
+; CHECK-NEXT: vluxseg4ei32.v v7, (a0), v12, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg4.mask.nxv4i16.nxv4i32(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
; CHECK-LABEL: test_vluxseg5_mask_nxv4i16_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vluxseg5ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg5_mask_nxv4i16_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vluxseg5ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg5_mask_nxv4i16_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vluxseg5ei32.v v12, (a0), v10, v0.t
; CHECK-NEXT: vmv1r.v v8, v13
; CHECK-LABEL: test_vluxseg6_mask_nxv4i16_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vluxseg6ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg6_mask_nxv4i16_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vluxseg6ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg6_mask_nxv4i16_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vluxseg6ei32.v v12, (a0), v10, v0.t
; CHECK-NEXT: vmv1r.v v8, v13
; CHECK-LABEL: test_vluxseg7_mask_nxv4i16_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vluxseg7ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg7_mask_nxv4i16_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vluxseg7ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg7_mask_nxv4i16_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
-; CHECK-NEXT: vmv1r.v v18, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vluxseg7ei32.v v12, (a0), v10, v0.t
; CHECK-NEXT: vmv1r.v v8, v13
; CHECK-LABEL: test_vluxseg8_mask_nxv4i16_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vluxseg8ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg8_mask_nxv4i16_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vluxseg8ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg8_mask_nxv4i16_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
-; CHECK-NEXT: vmv1r.v v18, v12
-; CHECK-NEXT: vmv1r.v v19, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
+; CHECK-NEXT: vmv1r.v v19, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vluxseg8ei32.v v12, (a0), v10, v0.t
; CHECK-NEXT: vmv1r.v v8, v13
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vluxseg2ei8.v v7, (a0), v9, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg2.mask.nxv1i32.nxv1i8(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vluxseg2ei32.v v7, (a0), v9, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg2.mask.nxv1i32.nxv1i32(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vluxseg2ei16.v v7, (a0), v9, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg2.mask.nxv1i32.nxv1i16(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
define <vscale x 1 x i32> @test_vluxseg3_mask_nxv1i32_nxv1i8(<vscale x 1 x i32> %val, i32* %base, <vscale x 1 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
; CHECK-LABEL: test_vluxseg3_mask_nxv1i32_nxv1i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v10, v9
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
-; CHECK-NEXT: vluxseg3ei8.v v10, (a0), v9, v0.t
-; CHECK-NEXT: vmv1r.v v8, v11
+; CHECK-NEXT: vluxseg3ei8.v v7, (a0), v10, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg3.mask.nxv1i32.nxv1i8(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
define <vscale x 1 x i32> @test_vluxseg3_mask_nxv1i32_nxv1i32(<vscale x 1 x i32> %val, i32* %base, <vscale x 1 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
; CHECK-LABEL: test_vluxseg3_mask_nxv1i32_nxv1i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v10, v9
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
-; CHECK-NEXT: vluxseg3ei32.v v10, (a0), v9, v0.t
-; CHECK-NEXT: vmv1r.v v8, v11
+; CHECK-NEXT: vluxseg3ei32.v v7, (a0), v10, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg3.mask.nxv1i32.nxv1i32(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
define <vscale x 1 x i32> @test_vluxseg3_mask_nxv1i32_nxv1i16(<vscale x 1 x i32> %val, i32* %base, <vscale x 1 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
; CHECK-LABEL: test_vluxseg3_mask_nxv1i32_nxv1i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v10, v9
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
-; CHECK-NEXT: vluxseg3ei16.v v10, (a0), v9, v0.t
-; CHECK-NEXT: vmv1r.v v8, v11
+; CHECK-NEXT: vluxseg3ei16.v v7, (a0), v10, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg3.mask.nxv1i32.nxv1i16(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
; CHECK-LABEL: test_vluxseg4_mask_nxv1i32_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vluxseg4ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg4_mask_nxv1i32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vluxseg4ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg4_mask_nxv1i32_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vluxseg4ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg5_mask_nxv1i32_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vluxseg5ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg5_mask_nxv1i32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vluxseg5ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg5_mask_nxv1i32_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vluxseg5ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg6_mask_nxv1i32_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vluxseg6ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg6_mask_nxv1i32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vluxseg6ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg6_mask_nxv1i32_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vluxseg6ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg7_mask_nxv1i32_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vluxseg7ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg7_mask_nxv1i32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vluxseg7ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg7_mask_nxv1i32_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vluxseg7ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg8_mask_nxv1i32_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vluxseg8ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg8_mask_nxv1i32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vluxseg8ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg8_mask_nxv1i32_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vluxseg8ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-NEXT: vmv2r.v v6, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
; CHECK-NEXT: vluxseg2ei16.v v6, (a0), v10, v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg2.mask.nxv8i16.nxv8i16(<vscale x 8 x i16> %val,<vscale x 8 x i16> %val, i16* %base, <vscale x 8 x i16> %index, <vscale x 8 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vmv2r.v v6, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
; CHECK-NEXT: vluxseg2ei8.v v6, (a0), v10, v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg2.mask.nxv8i16.nxv8i8(<vscale x 8 x i16> %val,<vscale x 8 x i16> %val, i16* %base, <vscale x 8 x i8> %index, <vscale x 8 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vmv2r.v v6, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
; CHECK-NEXT: vluxseg2ei32.v v6, (a0), v12, v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg2.mask.nxv8i16.nxv8i32(<vscale x 8 x i16> %val,<vscale x 8 x i16> %val, i16* %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i32 %vl, i32 1)
define <vscale x 8 x i16> @test_vluxseg3_mask_nxv8i16_nxv8i16(<vscale x 8 x i16> %val, i16* %base, <vscale x 8 x i16> %index, i32 %vl, <vscale x 8 x i1> %mask) {
; CHECK-LABEL: test_vluxseg3_mask_nxv8i16_nxv8i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v6, v8
+; CHECK-NEXT: vmv2r.v v12, v10
+; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
-; CHECK-NEXT: vluxseg3ei16.v v12, (a0), v10, v0.t
-; CHECK-NEXT: vmv2r.v v8, v14
+; CHECK-NEXT: vluxseg3ei16.v v6, (a0), v12, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg3.mask.nxv8i16.nxv8i16(<vscale x 8 x i16> %val,<vscale x 8 x i16> %val,<vscale x 8 x i16> %val, i16* %base, <vscale x 8 x i16> %index, <vscale x 8 x i1> %mask, i32 %vl, i32 1)
define <vscale x 8 x i16> @test_vluxseg3_mask_nxv8i16_nxv8i8(<vscale x 8 x i16> %val, i16* %base, <vscale x 8 x i8> %index, i32 %vl, <vscale x 8 x i1> %mask) {
; CHECK-LABEL: test_vluxseg3_mask_nxv8i16_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v6, v8
+; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
-; CHECK-NEXT: vluxseg3ei8.v v12, (a0), v10, v0.t
-; CHECK-NEXT: vmv2r.v v8, v14
+; CHECK-NEXT: vluxseg3ei8.v v6, (a0), v12, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg3.mask.nxv8i16.nxv8i8(<vscale x 8 x i16> %val,<vscale x 8 x i16> %val,<vscale x 8 x i16> %val, i16* %base, <vscale x 8 x i8> %index, <vscale x 8 x i1> %mask, i32 %vl, i32 1)
; CHECK-LABEL: test_vluxseg3_mask_nxv8i16_nxv8i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v6, v8
-; CHECK-NEXT: vmv2r.v v10, v6
+; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
; CHECK-NEXT: vluxseg3ei32.v v6, (a0), v12, v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg3.mask.nxv8i16.nxv8i32(<vscale x 8 x i16> %val,<vscale x 8 x i16> %val,<vscale x 8 x i16> %val, i16* %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i32 %vl, i32 1)
; CHECK-LABEL: test_vluxseg4_mask_nxv8i16_nxv8i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
; CHECK-NEXT: vluxseg4ei16.v v12, (a0), v10, v0.t
; CHECK-NEXT: vmv2r.v v8, v14
; CHECK-LABEL: test_vluxseg4_mask_nxv8i16_nxv8i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
; CHECK-NEXT: vluxseg4ei8.v v12, (a0), v10, v0.t
; CHECK-NEXT: vmv2r.v v8, v14
define <vscale x 8 x i16> @test_vluxseg4_mask_nxv8i16_nxv8i32(<vscale x 8 x i16> %val, i16* %base, <vscale x 8 x i32> %index, i32 %vl, <vscale x 8 x i1> %mask) {
; CHECK-LABEL: test_vluxseg4_mask_nxv8i16_nxv8i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv2r.v v16, v8
-; CHECK-NEXT: vmv2r.v v18, v16
-; CHECK-NEXT: vmv2r.v v20, v16
-; CHECK-NEXT: vmv2r.v v22, v16
+; CHECK-NEXT: vmv2r.v v6, v8
+; CHECK-NEXT: vmv2r.v v10, v8
+; CHECK-NEXT: vmv4r.v v16, v12
+; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
-; CHECK-NEXT: vluxseg4ei32.v v16, (a0), v12, v0.t
-; CHECK-NEXT: vmv2r.v v8, v18
+; CHECK-NEXT: vluxseg4ei32.v v6, (a0), v16, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg4.mask.nxv8i16.nxv8i32(<vscale x 8 x i16> %val,<vscale x 8 x i16> %val,<vscale x 8 x i16> %val,<vscale x 8 x i16> %val, i16* %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vluxseg2ei16.v v7, (a0), v10, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg2.mask.nxv8i8.nxv8i16(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i16> %index, <vscale x 8 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vluxseg2ei8.v v7, (a0), v9, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg2.mask.nxv8i8.nxv8i8(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i8> %index, <vscale x 8 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vluxseg2ei32.v v7, (a0), v12, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg2.mask.nxv8i8.nxv8i32(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i32 %vl, i32 1)
; CHECK-LABEL: test_vluxseg3_mask_nxv8i8_nxv8i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vluxseg3ei16.v v7, (a0), v10, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg3.mask.nxv8i8.nxv8i16(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i16> %index, <vscale x 8 x i1> %mask, i32 %vl, i32 1)
define <vscale x 8 x i8> @test_vluxseg3_mask_nxv8i8_nxv8i8(<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i8> %index, i32 %vl, <vscale x 8 x i1> %mask) {
; CHECK-LABEL: test_vluxseg3_mask_nxv8i8_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v10, v9
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
-; CHECK-NEXT: vluxseg3ei8.v v10, (a0), v9, v0.t
-; CHECK-NEXT: vmv1r.v v8, v11
+; CHECK-NEXT: vluxseg3ei8.v v7, (a0), v10, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg3.mask.nxv8i8.nxv8i8(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i8> %index, <vscale x 8 x i1> %mask, i32 %vl, i32 1)
; CHECK-LABEL: test_vluxseg3_mask_nxv8i8_nxv8i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vluxseg3ei32.v v7, (a0), v12, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg3.mask.nxv8i8.nxv8i32(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i32 %vl, i32 1)
define <vscale x 8 x i8> @test_vluxseg4_mask_nxv8i8_nxv8i16(<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i16> %index, i32 %vl, <vscale x 8 x i1> %mask) {
; CHECK-LABEL: test_vluxseg4_mask_nxv8i8_nxv8i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv2r.v v12, v10
+; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
-; CHECK-NEXT: vluxseg4ei16.v v12, (a0), v10, v0.t
-; CHECK-NEXT: vmv1r.v v8, v13
+; CHECK-NEXT: vluxseg4ei16.v v7, (a0), v12, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg4.mask.nxv8i8.nxv8i16(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i16> %index, <vscale x 8 x i1> %mask, i32 %vl, i32 1)
; CHECK-LABEL: test_vluxseg4_mask_nxv8i8_nxv8i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vluxseg4ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg4_mask_nxv8i8_nxv8i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
-; CHECK-NEXT: vmv1r.v v10, v7
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vluxseg4ei32.v v7, (a0), v12, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg4.mask.nxv8i8.nxv8i32(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i32 %vl, i32 1)
; CHECK-LABEL: test_vluxseg5_mask_nxv8i8_nxv8i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vluxseg5ei16.v v12, (a0), v10, v0.t
; CHECK-NEXT: vmv1r.v v8, v13
; CHECK-LABEL: test_vluxseg5_mask_nxv8i8_nxv8i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vluxseg5ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg5_mask_nxv8i8_nxv8i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
-; CHECK-NEXT: vmv1r.v v10, v7
-; CHECK-NEXT: vmv1r.v v11, v7
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vmv1r.v v11, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vluxseg5ei32.v v7, (a0), v12, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg5.mask.nxv8i8.nxv8i32(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i32 %vl, i32 1)
; CHECK-LABEL: test_vluxseg6_mask_nxv8i8_nxv8i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vluxseg6ei16.v v12, (a0), v10, v0.t
; CHECK-NEXT: vmv1r.v v8, v13
; CHECK-LABEL: test_vluxseg6_mask_nxv8i8_nxv8i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vluxseg6ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
define <vscale x 8 x i8> @test_vluxseg6_mask_nxv8i8_nxv8i32(<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i32> %index, i32 %vl, <vscale x 8 x i1> %mask) {
; CHECK-LABEL: test_vluxseg6_mask_nxv8i8_nxv8i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v16, v8
-; CHECK-NEXT: vmv1r.v v17, v16
-; CHECK-NEXT: vmv1r.v v18, v16
-; CHECK-NEXT: vmv1r.v v19, v16
-; CHECK-NEXT: vmv1r.v v20, v16
-; CHECK-NEXT: vmv1r.v v21, v16
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv4r.v v16, v12
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
-; CHECK-NEXT: vluxseg6ei32.v v16, (a0), v12, v0.t
-; CHECK-NEXT: vmv1r.v v8, v17
+; CHECK-NEXT: vluxseg6ei32.v v7, (a0), v16, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg6.mask.nxv8i8.nxv8i32(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i32 %vl, i32 1)
; CHECK-LABEL: test_vluxseg7_mask_nxv8i8_nxv8i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
-; CHECK-NEXT: vmv1r.v v18, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vluxseg7ei16.v v12, (a0), v10, v0.t
; CHECK-NEXT: vmv1r.v v8, v13
; CHECK-LABEL: test_vluxseg7_mask_nxv8i8_nxv8i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vluxseg7ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg7_mask_nxv8i8_nxv8i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v16, v8
-; CHECK-NEXT: vmv1r.v v17, v16
-; CHECK-NEXT: vmv1r.v v18, v16
-; CHECK-NEXT: vmv1r.v v19, v16
-; CHECK-NEXT: vmv1r.v v20, v16
-; CHECK-NEXT: vmv1r.v v21, v16
-; CHECK-NEXT: vmv1r.v v22, v16
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
+; CHECK-NEXT: vmv1r.v v19, v8
+; CHECK-NEXT: vmv1r.v v20, v8
+; CHECK-NEXT: vmv1r.v v21, v8
+; CHECK-NEXT: vmv1r.v v22, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vluxseg7ei32.v v16, (a0), v12, v0.t
; CHECK-NEXT: vmv1r.v v8, v17
; CHECK-LABEL: test_vluxseg8_mask_nxv8i8_nxv8i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
-; CHECK-NEXT: vmv1r.v v18, v12
-; CHECK-NEXT: vmv1r.v v19, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
+; CHECK-NEXT: vmv1r.v v19, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vluxseg8ei16.v v12, (a0), v10, v0.t
; CHECK-NEXT: vmv1r.v v8, v13
; CHECK-LABEL: test_vluxseg8_mask_nxv8i8_nxv8i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vluxseg8ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg8_mask_nxv8i8_nxv8i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v16, v8
-; CHECK-NEXT: vmv1r.v v17, v16
-; CHECK-NEXT: vmv1r.v v18, v16
-; CHECK-NEXT: vmv1r.v v19, v16
-; CHECK-NEXT: vmv1r.v v20, v16
-; CHECK-NEXT: vmv1r.v v21, v16
-; CHECK-NEXT: vmv1r.v v22, v16
-; CHECK-NEXT: vmv1r.v v23, v16
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
+; CHECK-NEXT: vmv1r.v v19, v8
+; CHECK-NEXT: vmv1r.v v20, v8
+; CHECK-NEXT: vmv1r.v v21, v8
+; CHECK-NEXT: vmv1r.v v22, v8
+; CHECK-NEXT: vmv1r.v v23, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vluxseg8ei32.v v16, (a0), v12, v0.t
; CHECK-NEXT: vmv1r.v v8, v17
; CHECK-NEXT: vmv4r.v v4, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
; CHECK-NEXT: vluxseg2ei16.v v4, (a0), v12, v0.t
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i32>,<vscale x 8 x i32>} @llvm.riscv.vluxseg2.mask.nxv8i32.nxv8i16(<vscale x 8 x i32> %val,<vscale x 8 x i32> %val, i32* %base, <vscale x 8 x i16> %index, <vscale x 8 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vmv4r.v v4, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
; CHECK-NEXT: vluxseg2ei8.v v4, (a0), v12, v0.t
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i32>,<vscale x 8 x i32>} @llvm.riscv.vluxseg2.mask.nxv8i32.nxv8i8(<vscale x 8 x i32> %val,<vscale x 8 x i32> %val, i32* %base, <vscale x 8 x i8> %index, <vscale x 8 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vmv4r.v v4, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
; CHECK-NEXT: vluxseg2ei32.v v4, (a0), v12, v0.t
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i32>,<vscale x 8 x i32>} @llvm.riscv.vluxseg2.mask.nxv8i32.nxv8i32(<vscale x 8 x i32> %val,<vscale x 8 x i32> %val, i32* %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vluxseg2ei16.v v7, (a0), v9, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg2.mask.nxv4i8.nxv4i16(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vluxseg2ei8.v v7, (a0), v9, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg2.mask.nxv4i8.nxv4i8(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vluxseg2ei32.v v7, (a0), v10, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg2.mask.nxv4i8.nxv4i32(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
define <vscale x 4 x i8> @test_vluxseg3_mask_nxv4i8_nxv4i16(<vscale x 4 x i8> %val, i8* %base, <vscale x 4 x i16> %index, i32 %vl, <vscale x 4 x i1> %mask) {
; CHECK-LABEL: test_vluxseg3_mask_nxv4i8_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v10, v9
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
-; CHECK-NEXT: vluxseg3ei16.v v10, (a0), v9, v0.t
-; CHECK-NEXT: vmv1r.v v8, v11
+; CHECK-NEXT: vluxseg3ei16.v v7, (a0), v10, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg3.mask.nxv4i8.nxv4i16(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
define <vscale x 4 x i8> @test_vluxseg3_mask_nxv4i8_nxv4i8(<vscale x 4 x i8> %val, i8* %base, <vscale x 4 x i8> %index, i32 %vl, <vscale x 4 x i1> %mask) {
; CHECK-LABEL: test_vluxseg3_mask_nxv4i8_nxv4i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v10, v9
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
-; CHECK-NEXT: vluxseg3ei8.v v10, (a0), v9, v0.t
-; CHECK-NEXT: vmv1r.v v8, v11
+; CHECK-NEXT: vluxseg3ei8.v v7, (a0), v10, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg3.mask.nxv4i8.nxv4i8(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
; CHECK-LABEL: test_vluxseg3_mask_nxv4i8_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vluxseg3ei32.v v7, (a0), v10, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg3.mask.nxv4i8.nxv4i32(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
; CHECK-LABEL: test_vluxseg4_mask_nxv4i8_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vluxseg4ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg4_mask_nxv4i8_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vluxseg4ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
define <vscale x 4 x i8> @test_vluxseg4_mask_nxv4i8_nxv4i32(<vscale x 4 x i8> %val, i8* %base, <vscale x 4 x i32> %index, i32 %vl, <vscale x 4 x i1> %mask) {
; CHECK-LABEL: test_vluxseg4_mask_nxv4i8_nxv4i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv2r.v v12, v10
+; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
-; CHECK-NEXT: vluxseg4ei32.v v12, (a0), v10, v0.t
-; CHECK-NEXT: vmv1r.v v8, v13
+; CHECK-NEXT: vluxseg4ei32.v v7, (a0), v12, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg4.mask.nxv4i8.nxv4i32(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
; CHECK-LABEL: test_vluxseg5_mask_nxv4i8_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vluxseg5ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg5_mask_nxv4i8_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vluxseg5ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg5_mask_nxv4i8_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vluxseg5ei32.v v12, (a0), v10, v0.t
; CHECK-NEXT: vmv1r.v v8, v13
; CHECK-LABEL: test_vluxseg6_mask_nxv4i8_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vluxseg6ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg6_mask_nxv4i8_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vluxseg6ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg6_mask_nxv4i8_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vluxseg6ei32.v v12, (a0), v10, v0.t
; CHECK-NEXT: vmv1r.v v8, v13
; CHECK-LABEL: test_vluxseg7_mask_nxv4i8_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vluxseg7ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg7_mask_nxv4i8_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vluxseg7ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg7_mask_nxv4i8_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
-; CHECK-NEXT: vmv1r.v v18, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vluxseg7ei32.v v12, (a0), v10, v0.t
; CHECK-NEXT: vmv1r.v v8, v13
; CHECK-LABEL: test_vluxseg8_mask_nxv4i8_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vluxseg8ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg8_mask_nxv4i8_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vluxseg8ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg8_mask_nxv4i8_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
-; CHECK-NEXT: vmv1r.v v18, v12
-; CHECK-NEXT: vmv1r.v v19, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
+; CHECK-NEXT: vmv1r.v v19, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vluxseg8ei32.v v12, (a0), v10, v0.t
; CHECK-NEXT: vmv1r.v v8, v13
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vluxseg2ei8.v v7, (a0), v9, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg2.mask.nxv1i16.nxv1i8(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vluxseg2ei32.v v7, (a0), v9, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg2.mask.nxv1i16.nxv1i32(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vluxseg2ei16.v v7, (a0), v9, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg2.mask.nxv1i16.nxv1i16(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
define <vscale x 1 x i16> @test_vluxseg3_mask_nxv1i16_nxv1i8(<vscale x 1 x i16> %val, i16* %base, <vscale x 1 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
; CHECK-LABEL: test_vluxseg3_mask_nxv1i16_nxv1i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v10, v9
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
-; CHECK-NEXT: vluxseg3ei8.v v10, (a0), v9, v0.t
-; CHECK-NEXT: vmv1r.v v8, v11
+; CHECK-NEXT: vluxseg3ei8.v v7, (a0), v10, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg3.mask.nxv1i16.nxv1i8(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
define <vscale x 1 x i16> @test_vluxseg3_mask_nxv1i16_nxv1i32(<vscale x 1 x i16> %val, i16* %base, <vscale x 1 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
; CHECK-LABEL: test_vluxseg3_mask_nxv1i16_nxv1i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v10, v9
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
-; CHECK-NEXT: vluxseg3ei32.v v10, (a0), v9, v0.t
-; CHECK-NEXT: vmv1r.v v8, v11
+; CHECK-NEXT: vluxseg3ei32.v v7, (a0), v10, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg3.mask.nxv1i16.nxv1i32(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
define <vscale x 1 x i16> @test_vluxseg3_mask_nxv1i16_nxv1i16(<vscale x 1 x i16> %val, i16* %base, <vscale x 1 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
; CHECK-LABEL: test_vluxseg3_mask_nxv1i16_nxv1i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v10, v9
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
-; CHECK-NEXT: vluxseg3ei16.v v10, (a0), v9, v0.t
-; CHECK-NEXT: vmv1r.v v8, v11
+; CHECK-NEXT: vluxseg3ei16.v v7, (a0), v10, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg3.mask.nxv1i16.nxv1i16(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
; CHECK-LABEL: test_vluxseg4_mask_nxv1i16_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vluxseg4ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg4_mask_nxv1i16_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vluxseg4ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg4_mask_nxv1i16_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vluxseg4ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg5_mask_nxv1i16_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vluxseg5ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg5_mask_nxv1i16_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vluxseg5ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg5_mask_nxv1i16_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vluxseg5ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg6_mask_nxv1i16_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vluxseg6ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg6_mask_nxv1i16_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vluxseg6ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg6_mask_nxv1i16_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vluxseg6ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg7_mask_nxv1i16_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vluxseg7ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg7_mask_nxv1i16_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vluxseg7ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg7_mask_nxv1i16_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vluxseg7ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg8_mask_nxv1i16_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vluxseg8ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg8_mask_nxv1i16_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vluxseg8ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg8_mask_nxv1i16_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vluxseg8ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-NEXT: vmv4r.v v4, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu
; CHECK-NEXT: vluxseg2ei16.v v4, (a0), v16, v0.t
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 32 x i8>,<vscale x 32 x i8>} @llvm.riscv.vluxseg2.mask.nxv32i8.nxv32i16(<vscale x 32 x i8> %val,<vscale x 32 x i8> %val, i8* %base, <vscale x 32 x i16> %index, <vscale x 32 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vmv4r.v v4, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu
; CHECK-NEXT: vluxseg2ei8.v v4, (a0), v12, v0.t
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 32 x i8>,<vscale x 32 x i8>} @llvm.riscv.vluxseg2.mask.nxv32i8.nxv32i8(<vscale x 32 x i8> %val,<vscale x 32 x i8> %val, i8* %base, <vscale x 32 x i8> %index, <vscale x 32 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vluxseg2ei32.v v7, (a0), v9, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg2.mask.nxv2i8.nxv2i32(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, i8* %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vluxseg2ei8.v v7, (a0), v9, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg2.mask.nxv2i8.nxv2i8(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, i8* %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vluxseg2ei16.v v7, (a0), v9, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg2.mask.nxv2i8.nxv2i16(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, i8* %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
define <vscale x 2 x i8> @test_vluxseg3_mask_nxv2i8_nxv2i32(<vscale x 2 x i8> %val, i8* %base, <vscale x 2 x i32> %index, i32 %vl, <vscale x 2 x i1> %mask) {
; CHECK-LABEL: test_vluxseg3_mask_nxv2i8_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v10, v9
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
-; CHECK-NEXT: vluxseg3ei32.v v10, (a0), v9, v0.t
-; CHECK-NEXT: vmv1r.v v8, v11
+; CHECK-NEXT: vluxseg3ei32.v v7, (a0), v10, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg3.mask.nxv2i8.nxv2i32(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, i8* %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
define <vscale x 2 x i8> @test_vluxseg3_mask_nxv2i8_nxv2i8(<vscale x 2 x i8> %val, i8* %base, <vscale x 2 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
; CHECK-LABEL: test_vluxseg3_mask_nxv2i8_nxv2i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v10, v9
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
-; CHECK-NEXT: vluxseg3ei8.v v10, (a0), v9, v0.t
-; CHECK-NEXT: vmv1r.v v8, v11
+; CHECK-NEXT: vluxseg3ei8.v v7, (a0), v10, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg3.mask.nxv2i8.nxv2i8(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, i8* %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
define <vscale x 2 x i8> @test_vluxseg3_mask_nxv2i8_nxv2i16(<vscale x 2 x i8> %val, i8* %base, <vscale x 2 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
; CHECK-LABEL: test_vluxseg3_mask_nxv2i8_nxv2i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v10, v9
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
-; CHECK-NEXT: vluxseg3ei16.v v10, (a0), v9, v0.t
-; CHECK-NEXT: vmv1r.v v8, v11
+; CHECK-NEXT: vluxseg3ei16.v v7, (a0), v10, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg3.mask.nxv2i8.nxv2i16(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, i8* %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
; CHECK-LABEL: test_vluxseg4_mask_nxv2i8_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vluxseg4ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg4_mask_nxv2i8_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vluxseg4ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg4_mask_nxv2i8_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vluxseg4ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg5_mask_nxv2i8_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vluxseg5ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg5_mask_nxv2i8_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vluxseg5ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg5_mask_nxv2i8_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vluxseg5ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg6_mask_nxv2i8_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vluxseg6ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg6_mask_nxv2i8_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vluxseg6ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg6_mask_nxv2i8_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vluxseg6ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg7_mask_nxv2i8_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vluxseg7ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg7_mask_nxv2i8_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vluxseg7ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg7_mask_nxv2i8_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vluxseg7ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg8_mask_nxv2i8_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vluxseg8ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg8_mask_nxv2i8_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vluxseg8ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg8_mask_nxv2i8_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vluxseg8ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vluxseg2ei32.v v7, (a0), v9, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg2.mask.nxv2i16.nxv2i32(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, i16* %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vluxseg2ei8.v v7, (a0), v9, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg2.mask.nxv2i16.nxv2i8(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, i16* %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vluxseg2ei16.v v7, (a0), v9, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg2.mask.nxv2i16.nxv2i16(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, i16* %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
define <vscale x 2 x i16> @test_vluxseg3_mask_nxv2i16_nxv2i32(<vscale x 2 x i16> %val, i16* %base, <vscale x 2 x i32> %index, i32 %vl, <vscale x 2 x i1> %mask) {
; CHECK-LABEL: test_vluxseg3_mask_nxv2i16_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v10, v9
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
-; CHECK-NEXT: vluxseg3ei32.v v10, (a0), v9, v0.t
-; CHECK-NEXT: vmv1r.v v8, v11
+; CHECK-NEXT: vluxseg3ei32.v v7, (a0), v10, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg3.mask.nxv2i16.nxv2i32(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, i16* %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
define <vscale x 2 x i16> @test_vluxseg3_mask_nxv2i16_nxv2i8(<vscale x 2 x i16> %val, i16* %base, <vscale x 2 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
; CHECK-LABEL: test_vluxseg3_mask_nxv2i16_nxv2i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v10, v9
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
-; CHECK-NEXT: vluxseg3ei8.v v10, (a0), v9, v0.t
-; CHECK-NEXT: vmv1r.v v8, v11
+; CHECK-NEXT: vluxseg3ei8.v v7, (a0), v10, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg3.mask.nxv2i16.nxv2i8(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, i16* %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
define <vscale x 2 x i16> @test_vluxseg3_mask_nxv2i16_nxv2i16(<vscale x 2 x i16> %val, i16* %base, <vscale x 2 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
; CHECK-LABEL: test_vluxseg3_mask_nxv2i16_nxv2i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v10, v9
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
-; CHECK-NEXT: vluxseg3ei16.v v10, (a0), v9, v0.t
-; CHECK-NEXT: vmv1r.v v8, v11
+; CHECK-NEXT: vluxseg3ei16.v v7, (a0), v10, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg3.mask.nxv2i16.nxv2i16(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, i16* %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
; CHECK-LABEL: test_vluxseg4_mask_nxv2i16_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vluxseg4ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg4_mask_nxv2i16_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vluxseg4ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg4_mask_nxv2i16_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vluxseg4ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg5_mask_nxv2i16_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vluxseg5ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg5_mask_nxv2i16_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vluxseg5ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg5_mask_nxv2i16_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vluxseg5ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg6_mask_nxv2i16_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vluxseg6ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg6_mask_nxv2i16_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vluxseg6ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg6_mask_nxv2i16_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vluxseg6ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg7_mask_nxv2i16_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vluxseg7ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg7_mask_nxv2i16_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vluxseg7ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg7_mask_nxv2i16_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vluxseg7ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg8_mask_nxv2i16_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vluxseg8ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg8_mask_nxv2i16_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vluxseg8ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg8_mask_nxv2i16_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vluxseg8ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-NEXT: vmv2r.v v6, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; CHECK-NEXT: vluxseg2ei16.v v6, (a0), v10, v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg2.mask.nxv4i32.nxv4i16(<vscale x 4 x i32> %val,<vscale x 4 x i32> %val, i32* %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vmv2r.v v6, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; CHECK-NEXT: vluxseg2ei8.v v6, (a0), v10, v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg2.mask.nxv4i32.nxv4i8(<vscale x 4 x i32> %val,<vscale x 4 x i32> %val, i32* %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vmv2r.v v6, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; CHECK-NEXT: vluxseg2ei32.v v6, (a0), v10, v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg2.mask.nxv4i32.nxv4i32(<vscale x 4 x i32> %val,<vscale x 4 x i32> %val, i32* %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
define <vscale x 4 x i32> @test_vluxseg3_mask_nxv4i32_nxv4i16(<vscale x 4 x i32> %val, i32* %base, <vscale x 4 x i16> %index, i32 %vl, <vscale x 4 x i1> %mask) {
; CHECK-LABEL: test_vluxseg3_mask_nxv4i32_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v6, v8
+; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
-; CHECK-NEXT: vluxseg3ei16.v v12, (a0), v10, v0.t
-; CHECK-NEXT: vmv2r.v v8, v14
+; CHECK-NEXT: vluxseg3ei16.v v6, (a0), v12, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg3.mask.nxv4i32.nxv4i16(<vscale x 4 x i32> %val,<vscale x 4 x i32> %val,<vscale x 4 x i32> %val, i32* %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
define <vscale x 4 x i32> @test_vluxseg3_mask_nxv4i32_nxv4i8(<vscale x 4 x i32> %val, i32* %base, <vscale x 4 x i8> %index, i32 %vl, <vscale x 4 x i1> %mask) {
; CHECK-LABEL: test_vluxseg3_mask_nxv4i32_nxv4i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v6, v8
+; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
-; CHECK-NEXT: vluxseg3ei8.v v12, (a0), v10, v0.t
-; CHECK-NEXT: vmv2r.v v8, v14
+; CHECK-NEXT: vluxseg3ei8.v v6, (a0), v12, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg3.mask.nxv4i32.nxv4i8(<vscale x 4 x i32> %val,<vscale x 4 x i32> %val,<vscale x 4 x i32> %val, i32* %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
define <vscale x 4 x i32> @test_vluxseg3_mask_nxv4i32_nxv4i32(<vscale x 4 x i32> %val, i32* %base, <vscale x 4 x i32> %index, i32 %vl, <vscale x 4 x i1> %mask) {
; CHECK-LABEL: test_vluxseg3_mask_nxv4i32_nxv4i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v6, v8
+; CHECK-NEXT: vmv2r.v v12, v10
+; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
-; CHECK-NEXT: vluxseg3ei32.v v12, (a0), v10, v0.t
-; CHECK-NEXT: vmv2r.v v8, v14
+; CHECK-NEXT: vluxseg3ei32.v v6, (a0), v12, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg3.mask.nxv4i32.nxv4i32(<vscale x 4 x i32> %val,<vscale x 4 x i32> %val,<vscale x 4 x i32> %val, i32* %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
; CHECK-LABEL: test_vluxseg4_mask_nxv4i32_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; CHECK-NEXT: vluxseg4ei16.v v12, (a0), v10, v0.t
; CHECK-NEXT: vmv2r.v v8, v14
; CHECK-LABEL: test_vluxseg4_mask_nxv4i32_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; CHECK-NEXT: vluxseg4ei8.v v12, (a0), v10, v0.t
; CHECK-NEXT: vmv2r.v v8, v14
; CHECK-LABEL: test_vluxseg4_mask_nxv4i32_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; CHECK-NEXT: vluxseg4ei32.v v12, (a0), v10, v0.t
; CHECK-NEXT: vmv2r.v v8, v14
; CHECK-NEXT: vmv4r.v v4, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
; CHECK-NEXT: vluxseg2ei16.v v4, (a0), v12, v0.t
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 16 x half>,<vscale x 16 x half>} @llvm.riscv.vluxseg2.mask.nxv16f16.nxv16i16(<vscale x 16 x half> %val,<vscale x 16 x half> %val, half* %base, <vscale x 16 x i16> %index, <vscale x 16 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vmv4r.v v4, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
; CHECK-NEXT: vluxseg2ei8.v v4, (a0), v12, v0.t
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 16 x half>,<vscale x 16 x half>} @llvm.riscv.vluxseg2.mask.nxv16f16.nxv16i8(<vscale x 16 x half> %val,<vscale x 16 x half> %val, half* %base, <vscale x 16 x i8> %index, <vscale x 16 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vmv4r.v v4, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
; CHECK-NEXT: vluxseg2ei32.v v4, (a0), v16, v0.t
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 16 x half>,<vscale x 16 x half>} @llvm.riscv.vluxseg2.mask.nxv16f16.nxv16i32(<vscale x 16 x half> %val,<vscale x 16 x half> %val, half* %base, <vscale x 16 x i32> %index, <vscale x 16 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vmv4r.v v4, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu
; CHECK-NEXT: vluxseg2ei16.v v4, (a0), v12, v0.t
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x double>,<vscale x 4 x double>} @llvm.riscv.vluxseg2.mask.nxv4f64.nxv4i16(<vscale x 4 x double> %val,<vscale x 4 x double> %val, double* %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vmv4r.v v4, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu
; CHECK-NEXT: vluxseg2ei8.v v4, (a0), v12, v0.t
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x double>,<vscale x 4 x double>} @llvm.riscv.vluxseg2.mask.nxv4f64.nxv4i8(<vscale x 4 x double> %val,<vscale x 4 x double> %val, double* %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vmv4r.v v4, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu
; CHECK-NEXT: vluxseg2ei32.v v4, (a0), v12, v0.t
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x double>,<vscale x 4 x double>} @llvm.riscv.vluxseg2.mask.nxv4f64.nxv4i32(<vscale x 4 x double> %val,<vscale x 4 x double> %val, double* %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vluxseg2ei8.v v7, (a0), v9, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg2.mask.nxv1f64.nxv1i8(<vscale x 1 x double> %val,<vscale x 1 x double> %val, double* %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vluxseg2ei32.v v7, (a0), v9, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg2.mask.nxv1f64.nxv1i32(<vscale x 1 x double> %val,<vscale x 1 x double> %val, double* %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vluxseg2ei16.v v7, (a0), v9, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg2.mask.nxv1f64.nxv1i16(<vscale x 1 x double> %val,<vscale x 1 x double> %val, double* %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
define <vscale x 1 x double> @test_vluxseg3_mask_nxv1f64_nxv1i8(<vscale x 1 x double> %val, double* %base, <vscale x 1 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
; CHECK-LABEL: test_vluxseg3_mask_nxv1f64_nxv1i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v10, v9
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
-; CHECK-NEXT: vluxseg3ei8.v v10, (a0), v9, v0.t
-; CHECK-NEXT: vmv1r.v v8, v11
+; CHECK-NEXT: vluxseg3ei8.v v7, (a0), v10, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg3.mask.nxv1f64.nxv1i8(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, double* %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
define <vscale x 1 x double> @test_vluxseg3_mask_nxv1f64_nxv1i32(<vscale x 1 x double> %val, double* %base, <vscale x 1 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
; CHECK-LABEL: test_vluxseg3_mask_nxv1f64_nxv1i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v10, v9
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
-; CHECK-NEXT: vluxseg3ei32.v v10, (a0), v9, v0.t
-; CHECK-NEXT: vmv1r.v v8, v11
+; CHECK-NEXT: vluxseg3ei32.v v7, (a0), v10, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg3.mask.nxv1f64.nxv1i32(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, double* %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
define <vscale x 1 x double> @test_vluxseg3_mask_nxv1f64_nxv1i16(<vscale x 1 x double> %val, double* %base, <vscale x 1 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
; CHECK-LABEL: test_vluxseg3_mask_nxv1f64_nxv1i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v10, v9
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
-; CHECK-NEXT: vluxseg3ei16.v v10, (a0), v9, v0.t
-; CHECK-NEXT: vmv1r.v v8, v11
+; CHECK-NEXT: vluxseg3ei16.v v7, (a0), v10, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg3.mask.nxv1f64.nxv1i16(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, double* %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
; CHECK-LABEL: test_vluxseg4_mask_nxv1f64_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vluxseg4ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg4_mask_nxv1f64_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vluxseg4ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg4_mask_nxv1f64_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vluxseg4ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg5_mask_nxv1f64_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vluxseg5ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg5_mask_nxv1f64_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vluxseg5ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg5_mask_nxv1f64_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vluxseg5ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg6_mask_nxv1f64_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vluxseg6ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg6_mask_nxv1f64_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vluxseg6ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg6_mask_nxv1f64_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vluxseg6ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg7_mask_nxv1f64_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vluxseg7ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg7_mask_nxv1f64_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vluxseg7ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg7_mask_nxv1f64_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vluxseg7ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg8_mask_nxv1f64_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vluxseg8ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg8_mask_nxv1f64_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vluxseg8ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg8_mask_nxv1f64_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vluxseg8ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vluxseg2ei32.v v7, (a0), v9, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg2.mask.nxv2f32.nxv2i32(<vscale x 2 x float> %val,<vscale x 2 x float> %val, float* %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vluxseg2ei8.v v7, (a0), v9, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg2.mask.nxv2f32.nxv2i8(<vscale x 2 x float> %val,<vscale x 2 x float> %val, float* %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vluxseg2ei16.v v7, (a0), v9, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg2.mask.nxv2f32.nxv2i16(<vscale x 2 x float> %val,<vscale x 2 x float> %val, float* %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
define <vscale x 2 x float> @test_vluxseg3_mask_nxv2f32_nxv2i32(<vscale x 2 x float> %val, float* %base, <vscale x 2 x i32> %index, i32 %vl, <vscale x 2 x i1> %mask) {
; CHECK-LABEL: test_vluxseg3_mask_nxv2f32_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v10, v9
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
-; CHECK-NEXT: vluxseg3ei32.v v10, (a0), v9, v0.t
-; CHECK-NEXT: vmv1r.v v8, v11
+; CHECK-NEXT: vluxseg3ei32.v v7, (a0), v10, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg3.mask.nxv2f32.nxv2i32(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, float* %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
define <vscale x 2 x float> @test_vluxseg3_mask_nxv2f32_nxv2i8(<vscale x 2 x float> %val, float* %base, <vscale x 2 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
; CHECK-LABEL: test_vluxseg3_mask_nxv2f32_nxv2i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v10, v9
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
-; CHECK-NEXT: vluxseg3ei8.v v10, (a0), v9, v0.t
-; CHECK-NEXT: vmv1r.v v8, v11
+; CHECK-NEXT: vluxseg3ei8.v v7, (a0), v10, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg3.mask.nxv2f32.nxv2i8(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, float* %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
define <vscale x 2 x float> @test_vluxseg3_mask_nxv2f32_nxv2i16(<vscale x 2 x float> %val, float* %base, <vscale x 2 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
; CHECK-LABEL: test_vluxseg3_mask_nxv2f32_nxv2i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v10, v9
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
-; CHECK-NEXT: vluxseg3ei16.v v10, (a0), v9, v0.t
-; CHECK-NEXT: vmv1r.v v8, v11
+; CHECK-NEXT: vluxseg3ei16.v v7, (a0), v10, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg3.mask.nxv2f32.nxv2i16(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, float* %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
; CHECK-LABEL: test_vluxseg4_mask_nxv2f32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vluxseg4ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg4_mask_nxv2f32_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vluxseg4ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg4_mask_nxv2f32_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vluxseg4ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg5_mask_nxv2f32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vluxseg5ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg5_mask_nxv2f32_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vluxseg5ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg5_mask_nxv2f32_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vluxseg5ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg6_mask_nxv2f32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vluxseg6ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg6_mask_nxv2f32_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vluxseg6ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg6_mask_nxv2f32_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vluxseg6ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg7_mask_nxv2f32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vluxseg7ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg7_mask_nxv2f32_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vluxseg7ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg7_mask_nxv2f32_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vluxseg7ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg8_mask_nxv2f32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vluxseg8ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg8_mask_nxv2f32_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vluxseg8ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg8_mask_nxv2f32_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vluxseg8ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vluxseg2ei8.v v7, (a0), v9, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg2.mask.nxv1f16.nxv1i8(<vscale x 1 x half> %val,<vscale x 1 x half> %val, half* %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vluxseg2ei32.v v7, (a0), v9, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg2.mask.nxv1f16.nxv1i32(<vscale x 1 x half> %val,<vscale x 1 x half> %val, half* %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vluxseg2ei16.v v7, (a0), v9, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg2.mask.nxv1f16.nxv1i16(<vscale x 1 x half> %val,<vscale x 1 x half> %val, half* %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
define <vscale x 1 x half> @test_vluxseg3_mask_nxv1f16_nxv1i8(<vscale x 1 x half> %val, half* %base, <vscale x 1 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
; CHECK-LABEL: test_vluxseg3_mask_nxv1f16_nxv1i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v10, v9
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
-; CHECK-NEXT: vluxseg3ei8.v v10, (a0), v9, v0.t
-; CHECK-NEXT: vmv1r.v v8, v11
+; CHECK-NEXT: vluxseg3ei8.v v7, (a0), v10, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg3.mask.nxv1f16.nxv1i8(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, half* %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
define <vscale x 1 x half> @test_vluxseg3_mask_nxv1f16_nxv1i32(<vscale x 1 x half> %val, half* %base, <vscale x 1 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
; CHECK-LABEL: test_vluxseg3_mask_nxv1f16_nxv1i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v10, v9
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
-; CHECK-NEXT: vluxseg3ei32.v v10, (a0), v9, v0.t
-; CHECK-NEXT: vmv1r.v v8, v11
+; CHECK-NEXT: vluxseg3ei32.v v7, (a0), v10, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg3.mask.nxv1f16.nxv1i32(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, half* %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
define <vscale x 1 x half> @test_vluxseg3_mask_nxv1f16_nxv1i16(<vscale x 1 x half> %val, half* %base, <vscale x 1 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
; CHECK-LABEL: test_vluxseg3_mask_nxv1f16_nxv1i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v10, v9
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
-; CHECK-NEXT: vluxseg3ei16.v v10, (a0), v9, v0.t
-; CHECK-NEXT: vmv1r.v v8, v11
+; CHECK-NEXT: vluxseg3ei16.v v7, (a0), v10, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg3.mask.nxv1f16.nxv1i16(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, half* %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
; CHECK-LABEL: test_vluxseg4_mask_nxv1f16_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vluxseg4ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg4_mask_nxv1f16_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vluxseg4ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg4_mask_nxv1f16_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vluxseg4ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg5_mask_nxv1f16_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vluxseg5ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg5_mask_nxv1f16_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vluxseg5ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg5_mask_nxv1f16_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vluxseg5ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg6_mask_nxv1f16_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vluxseg6ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg6_mask_nxv1f16_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vluxseg6ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg6_mask_nxv1f16_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vluxseg6ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg7_mask_nxv1f16_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vluxseg7ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg7_mask_nxv1f16_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vluxseg7ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg7_mask_nxv1f16_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vluxseg7ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg8_mask_nxv1f16_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vluxseg8ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg8_mask_nxv1f16_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vluxseg8ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg8_mask_nxv1f16_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vluxseg8ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vluxseg2ei8.v v7, (a0), v9, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg2.mask.nxv1f32.nxv1i8(<vscale x 1 x float> %val,<vscale x 1 x float> %val, float* %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vluxseg2ei32.v v7, (a0), v9, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg2.mask.nxv1f32.nxv1i32(<vscale x 1 x float> %val,<vscale x 1 x float> %val, float* %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vluxseg2ei16.v v7, (a0), v9, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg2.mask.nxv1f32.nxv1i16(<vscale x 1 x float> %val,<vscale x 1 x float> %val, float* %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
define <vscale x 1 x float> @test_vluxseg3_mask_nxv1f32_nxv1i8(<vscale x 1 x float> %val, float* %base, <vscale x 1 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
; CHECK-LABEL: test_vluxseg3_mask_nxv1f32_nxv1i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v10, v9
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
-; CHECK-NEXT: vluxseg3ei8.v v10, (a0), v9, v0.t
-; CHECK-NEXT: vmv1r.v v8, v11
+; CHECK-NEXT: vluxseg3ei8.v v7, (a0), v10, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg3.mask.nxv1f32.nxv1i8(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, float* %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
define <vscale x 1 x float> @test_vluxseg3_mask_nxv1f32_nxv1i32(<vscale x 1 x float> %val, float* %base, <vscale x 1 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
; CHECK-LABEL: test_vluxseg3_mask_nxv1f32_nxv1i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v10, v9
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
-; CHECK-NEXT: vluxseg3ei32.v v10, (a0), v9, v0.t
-; CHECK-NEXT: vmv1r.v v8, v11
+; CHECK-NEXT: vluxseg3ei32.v v7, (a0), v10, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg3.mask.nxv1f32.nxv1i32(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, float* %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
define <vscale x 1 x float> @test_vluxseg3_mask_nxv1f32_nxv1i16(<vscale x 1 x float> %val, float* %base, <vscale x 1 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
; CHECK-LABEL: test_vluxseg3_mask_nxv1f32_nxv1i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v10, v9
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
-; CHECK-NEXT: vluxseg3ei16.v v10, (a0), v9, v0.t
-; CHECK-NEXT: vmv1r.v v8, v11
+; CHECK-NEXT: vluxseg3ei16.v v7, (a0), v10, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg3.mask.nxv1f32.nxv1i16(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, float* %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
; CHECK-LABEL: test_vluxseg4_mask_nxv1f32_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vluxseg4ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg4_mask_nxv1f32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vluxseg4ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg4_mask_nxv1f32_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vluxseg4ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg5_mask_nxv1f32_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vluxseg5ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg5_mask_nxv1f32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vluxseg5ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg5_mask_nxv1f32_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vluxseg5ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg6_mask_nxv1f32_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vluxseg6ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg6_mask_nxv1f32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vluxseg6ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg6_mask_nxv1f32_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vluxseg6ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg7_mask_nxv1f32_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vluxseg7ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg7_mask_nxv1f32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vluxseg7ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg7_mask_nxv1f32_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vluxseg7ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg8_mask_nxv1f32_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vluxseg8ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg8_mask_nxv1f32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vluxseg8ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg8_mask_nxv1f32_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vluxseg8ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-NEXT: vmv2r.v v6, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
; CHECK-NEXT: vluxseg2ei16.v v6, (a0), v10, v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg2.mask.nxv8f16.nxv8i16(<vscale x 8 x half> %val,<vscale x 8 x half> %val, half* %base, <vscale x 8 x i16> %index, <vscale x 8 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vmv2r.v v6, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
; CHECK-NEXT: vluxseg2ei8.v v6, (a0), v10, v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg2.mask.nxv8f16.nxv8i8(<vscale x 8 x half> %val,<vscale x 8 x half> %val, half* %base, <vscale x 8 x i8> %index, <vscale x 8 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vmv2r.v v6, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
; CHECK-NEXT: vluxseg2ei32.v v6, (a0), v12, v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg2.mask.nxv8f16.nxv8i32(<vscale x 8 x half> %val,<vscale x 8 x half> %val, half* %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i32 %vl, i32 1)
define <vscale x 8 x half> @test_vluxseg3_mask_nxv8f16_nxv8i16(<vscale x 8 x half> %val, half* %base, <vscale x 8 x i16> %index, i32 %vl, <vscale x 8 x i1> %mask) {
; CHECK-LABEL: test_vluxseg3_mask_nxv8f16_nxv8i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v6, v8
+; CHECK-NEXT: vmv2r.v v12, v10
+; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
-; CHECK-NEXT: vluxseg3ei16.v v12, (a0), v10, v0.t
-; CHECK-NEXT: vmv2r.v v8, v14
+; CHECK-NEXT: vluxseg3ei16.v v6, (a0), v12, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg3.mask.nxv8f16.nxv8i16(<vscale x 8 x half> %val,<vscale x 8 x half> %val,<vscale x 8 x half> %val, half* %base, <vscale x 8 x i16> %index, <vscale x 8 x i1> %mask, i32 %vl, i32 1)
define <vscale x 8 x half> @test_vluxseg3_mask_nxv8f16_nxv8i8(<vscale x 8 x half> %val, half* %base, <vscale x 8 x i8> %index, i32 %vl, <vscale x 8 x i1> %mask) {
; CHECK-LABEL: test_vluxseg3_mask_nxv8f16_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v6, v8
+; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
-; CHECK-NEXT: vluxseg3ei8.v v12, (a0), v10, v0.t
-; CHECK-NEXT: vmv2r.v v8, v14
+; CHECK-NEXT: vluxseg3ei8.v v6, (a0), v12, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg3.mask.nxv8f16.nxv8i8(<vscale x 8 x half> %val,<vscale x 8 x half> %val,<vscale x 8 x half> %val, half* %base, <vscale x 8 x i8> %index, <vscale x 8 x i1> %mask, i32 %vl, i32 1)
; CHECK-LABEL: test_vluxseg3_mask_nxv8f16_nxv8i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v6, v8
-; CHECK-NEXT: vmv2r.v v10, v6
+; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
; CHECK-NEXT: vluxseg3ei32.v v6, (a0), v12, v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg3.mask.nxv8f16.nxv8i32(<vscale x 8 x half> %val,<vscale x 8 x half> %val,<vscale x 8 x half> %val, half* %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i32 %vl, i32 1)
; CHECK-LABEL: test_vluxseg4_mask_nxv8f16_nxv8i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
; CHECK-NEXT: vluxseg4ei16.v v12, (a0), v10, v0.t
; CHECK-NEXT: vmv2r.v v8, v14
; CHECK-LABEL: test_vluxseg4_mask_nxv8f16_nxv8i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
; CHECK-NEXT: vluxseg4ei8.v v12, (a0), v10, v0.t
; CHECK-NEXT: vmv2r.v v8, v14
define <vscale x 8 x half> @test_vluxseg4_mask_nxv8f16_nxv8i32(<vscale x 8 x half> %val, half* %base, <vscale x 8 x i32> %index, i32 %vl, <vscale x 8 x i1> %mask) {
; CHECK-LABEL: test_vluxseg4_mask_nxv8f16_nxv8i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv2r.v v16, v8
-; CHECK-NEXT: vmv2r.v v18, v16
-; CHECK-NEXT: vmv2r.v v20, v16
-; CHECK-NEXT: vmv2r.v v22, v16
+; CHECK-NEXT: vmv2r.v v6, v8
+; CHECK-NEXT: vmv2r.v v10, v8
+; CHECK-NEXT: vmv4r.v v16, v12
+; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
-; CHECK-NEXT: vluxseg4ei32.v v16, (a0), v12, v0.t
-; CHECK-NEXT: vmv2r.v v8, v18
+; CHECK-NEXT: vluxseg4ei32.v v6, (a0), v16, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg4.mask.nxv8f16.nxv8i32(<vscale x 8 x half> %val,<vscale x 8 x half> %val,<vscale x 8 x half> %val,<vscale x 8 x half> %val, half* %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vmv4r.v v4, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
; CHECK-NEXT: vluxseg2ei16.v v4, (a0), v12, v0.t
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x float>,<vscale x 8 x float>} @llvm.riscv.vluxseg2.mask.nxv8f32.nxv8i16(<vscale x 8 x float> %val,<vscale x 8 x float> %val, float* %base, <vscale x 8 x i16> %index, <vscale x 8 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vmv4r.v v4, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
; CHECK-NEXT: vluxseg2ei8.v v4, (a0), v12, v0.t
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x float>,<vscale x 8 x float>} @llvm.riscv.vluxseg2.mask.nxv8f32.nxv8i8(<vscale x 8 x float> %val,<vscale x 8 x float> %val, float* %base, <vscale x 8 x i8> %index, <vscale x 8 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vmv4r.v v4, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
; CHECK-NEXT: vluxseg2ei32.v v4, (a0), v12, v0.t
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x float>,<vscale x 8 x float>} @llvm.riscv.vluxseg2.mask.nxv8f32.nxv8i32(<vscale x 8 x float> %val,<vscale x 8 x float> %val, float* %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vmv2r.v v6, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
; CHECK-NEXT: vluxseg2ei32.v v6, (a0), v10, v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg2.mask.nxv2f64.nxv2i32(<vscale x 2 x double> %val,<vscale x 2 x double> %val, double* %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vmv2r.v v6, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
; CHECK-NEXT: vluxseg2ei8.v v6, (a0), v10, v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg2.mask.nxv2f64.nxv2i8(<vscale x 2 x double> %val,<vscale x 2 x double> %val, double* %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vmv2r.v v6, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
; CHECK-NEXT: vluxseg2ei16.v v6, (a0), v10, v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg2.mask.nxv2f64.nxv2i16(<vscale x 2 x double> %val,<vscale x 2 x double> %val, double* %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
define <vscale x 2 x double> @test_vluxseg3_mask_nxv2f64_nxv2i32(<vscale x 2 x double> %val, double* %base, <vscale x 2 x i32> %index, i32 %vl, <vscale x 2 x i1> %mask) {
; CHECK-LABEL: test_vluxseg3_mask_nxv2f64_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v6, v8
+; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
-; CHECK-NEXT: vluxseg3ei32.v v12, (a0), v10, v0.t
-; CHECK-NEXT: vmv2r.v v8, v14
+; CHECK-NEXT: vluxseg3ei32.v v6, (a0), v12, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg3.mask.nxv2f64.nxv2i32(<vscale x 2 x double> %val,<vscale x 2 x double> %val,<vscale x 2 x double> %val, double* %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
define <vscale x 2 x double> @test_vluxseg3_mask_nxv2f64_nxv2i8(<vscale x 2 x double> %val, double* %base, <vscale x 2 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
; CHECK-LABEL: test_vluxseg3_mask_nxv2f64_nxv2i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v6, v8
+; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
-; CHECK-NEXT: vluxseg3ei8.v v12, (a0), v10, v0.t
-; CHECK-NEXT: vmv2r.v v8, v14
+; CHECK-NEXT: vluxseg3ei8.v v6, (a0), v12, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg3.mask.nxv2f64.nxv2i8(<vscale x 2 x double> %val,<vscale x 2 x double> %val,<vscale x 2 x double> %val, double* %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
define <vscale x 2 x double> @test_vluxseg3_mask_nxv2f64_nxv2i16(<vscale x 2 x double> %val, double* %base, <vscale x 2 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
; CHECK-LABEL: test_vluxseg3_mask_nxv2f64_nxv2i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v6, v8
+; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
-; CHECK-NEXT: vluxseg3ei16.v v12, (a0), v10, v0.t
-; CHECK-NEXT: vmv2r.v v8, v14
+; CHECK-NEXT: vluxseg3ei16.v v6, (a0), v12, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg3.mask.nxv2f64.nxv2i16(<vscale x 2 x double> %val,<vscale x 2 x double> %val,<vscale x 2 x double> %val, double* %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
; CHECK-LABEL: test_vluxseg4_mask_nxv2f64_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
; CHECK-NEXT: vluxseg4ei32.v v12, (a0), v10, v0.t
; CHECK-NEXT: vmv2r.v v8, v14
; CHECK-LABEL: test_vluxseg4_mask_nxv2f64_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
; CHECK-NEXT: vluxseg4ei8.v v12, (a0), v10, v0.t
; CHECK-NEXT: vmv2r.v v8, v14
; CHECK-LABEL: test_vluxseg4_mask_nxv2f64_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
; CHECK-NEXT: vluxseg4ei16.v v12, (a0), v10, v0.t
; CHECK-NEXT: vmv2r.v v8, v14
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vluxseg2ei16.v v7, (a0), v9, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg2.mask.nxv4f16.nxv4i16(<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vluxseg2ei8.v v7, (a0), v9, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg2.mask.nxv4f16.nxv4i8(<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vluxseg2ei32.v v7, (a0), v10, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg2.mask.nxv4f16.nxv4i32(<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
define <vscale x 4 x half> @test_vluxseg3_mask_nxv4f16_nxv4i16(<vscale x 4 x half> %val, half* %base, <vscale x 4 x i16> %index, i32 %vl, <vscale x 4 x i1> %mask) {
; CHECK-LABEL: test_vluxseg3_mask_nxv4f16_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v10, v9
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
-; CHECK-NEXT: vluxseg3ei16.v v10, (a0), v9, v0.t
-; CHECK-NEXT: vmv1r.v v8, v11
+; CHECK-NEXT: vluxseg3ei16.v v7, (a0), v10, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg3.mask.nxv4f16.nxv4i16(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
define <vscale x 4 x half> @test_vluxseg3_mask_nxv4f16_nxv4i8(<vscale x 4 x half> %val, half* %base, <vscale x 4 x i8> %index, i32 %vl, <vscale x 4 x i1> %mask) {
; CHECK-LABEL: test_vluxseg3_mask_nxv4f16_nxv4i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v10, v9
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
-; CHECK-NEXT: vluxseg3ei8.v v10, (a0), v9, v0.t
-; CHECK-NEXT: vmv1r.v v8, v11
+; CHECK-NEXT: vluxseg3ei8.v v7, (a0), v10, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg3.mask.nxv4f16.nxv4i8(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
; CHECK-LABEL: test_vluxseg3_mask_nxv4f16_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vluxseg3ei32.v v7, (a0), v10, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg3.mask.nxv4f16.nxv4i32(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
; CHECK-LABEL: test_vluxseg4_mask_nxv4f16_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vluxseg4ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg4_mask_nxv4f16_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vluxseg4ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
define <vscale x 4 x half> @test_vluxseg4_mask_nxv4f16_nxv4i32(<vscale x 4 x half> %val, half* %base, <vscale x 4 x i32> %index, i32 %vl, <vscale x 4 x i1> %mask) {
; CHECK-LABEL: test_vluxseg4_mask_nxv4f16_nxv4i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv2r.v v12, v10
+; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
-; CHECK-NEXT: vluxseg4ei32.v v12, (a0), v10, v0.t
-; CHECK-NEXT: vmv1r.v v8, v13
+; CHECK-NEXT: vluxseg4ei32.v v7, (a0), v12, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg4.mask.nxv4f16.nxv4i32(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
; CHECK-LABEL: test_vluxseg5_mask_nxv4f16_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vluxseg5ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg5_mask_nxv4f16_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vluxseg5ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg5_mask_nxv4f16_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vluxseg5ei32.v v12, (a0), v10, v0.t
; CHECK-NEXT: vmv1r.v v8, v13
; CHECK-LABEL: test_vluxseg6_mask_nxv4f16_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vluxseg6ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg6_mask_nxv4f16_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vluxseg6ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg6_mask_nxv4f16_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vluxseg6ei32.v v12, (a0), v10, v0.t
; CHECK-NEXT: vmv1r.v v8, v13
; CHECK-LABEL: test_vluxseg7_mask_nxv4f16_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vluxseg7ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg7_mask_nxv4f16_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vluxseg7ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg7_mask_nxv4f16_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
-; CHECK-NEXT: vmv1r.v v18, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vluxseg7ei32.v v12, (a0), v10, v0.t
; CHECK-NEXT: vmv1r.v v8, v13
; CHECK-LABEL: test_vluxseg8_mask_nxv4f16_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vluxseg8ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg8_mask_nxv4f16_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vluxseg8ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg8_mask_nxv4f16_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
-; CHECK-NEXT: vmv1r.v v18, v12
-; CHECK-NEXT: vmv1r.v v19, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
+; CHECK-NEXT: vmv1r.v v19, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vluxseg8ei32.v v12, (a0), v10, v0.t
; CHECK-NEXT: vmv1r.v v8, v13
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vluxseg2ei32.v v7, (a0), v9, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg2.mask.nxv2f16.nxv2i32(<vscale x 2 x half> %val,<vscale x 2 x half> %val, half* %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vluxseg2ei8.v v7, (a0), v9, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg2.mask.nxv2f16.nxv2i8(<vscale x 2 x half> %val,<vscale x 2 x half> %val, half* %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vluxseg2ei16.v v7, (a0), v9, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg2.mask.nxv2f16.nxv2i16(<vscale x 2 x half> %val,<vscale x 2 x half> %val, half* %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
define <vscale x 2 x half> @test_vluxseg3_mask_nxv2f16_nxv2i32(<vscale x 2 x half> %val, half* %base, <vscale x 2 x i32> %index, i32 %vl, <vscale x 2 x i1> %mask) {
; CHECK-LABEL: test_vluxseg3_mask_nxv2f16_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v10, v9
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
-; CHECK-NEXT: vluxseg3ei32.v v10, (a0), v9, v0.t
-; CHECK-NEXT: vmv1r.v v8, v11
+; CHECK-NEXT: vluxseg3ei32.v v7, (a0), v10, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg3.mask.nxv2f16.nxv2i32(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, half* %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
define <vscale x 2 x half> @test_vluxseg3_mask_nxv2f16_nxv2i8(<vscale x 2 x half> %val, half* %base, <vscale x 2 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
; CHECK-LABEL: test_vluxseg3_mask_nxv2f16_nxv2i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v10, v9
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
-; CHECK-NEXT: vluxseg3ei8.v v10, (a0), v9, v0.t
-; CHECK-NEXT: vmv1r.v v8, v11
+; CHECK-NEXT: vluxseg3ei8.v v7, (a0), v10, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg3.mask.nxv2f16.nxv2i8(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, half* %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
define <vscale x 2 x half> @test_vluxseg3_mask_nxv2f16_nxv2i16(<vscale x 2 x half> %val, half* %base, <vscale x 2 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
; CHECK-LABEL: test_vluxseg3_mask_nxv2f16_nxv2i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v10, v9
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
-; CHECK-NEXT: vluxseg3ei16.v v10, (a0), v9, v0.t
-; CHECK-NEXT: vmv1r.v v8, v11
+; CHECK-NEXT: vluxseg3ei16.v v7, (a0), v10, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg3.mask.nxv2f16.nxv2i16(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, half* %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
; CHECK-LABEL: test_vluxseg4_mask_nxv2f16_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vluxseg4ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg4_mask_nxv2f16_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vluxseg4ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg4_mask_nxv2f16_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vluxseg4ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg5_mask_nxv2f16_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vluxseg5ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg5_mask_nxv2f16_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vluxseg5ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg5_mask_nxv2f16_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vluxseg5ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg6_mask_nxv2f16_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vluxseg6ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg6_mask_nxv2f16_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vluxseg6ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg6_mask_nxv2f16_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vluxseg6ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg7_mask_nxv2f16_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vluxseg7ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg7_mask_nxv2f16_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vluxseg7ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg7_mask_nxv2f16_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vluxseg7ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg8_mask_nxv2f16_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vluxseg8ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg8_mask_nxv2f16_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vluxseg8ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg8_mask_nxv2f16_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vluxseg8ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-NEXT: vmv2r.v v6, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; CHECK-NEXT: vluxseg2ei16.v v6, (a0), v10, v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg2.mask.nxv4f32.nxv4i16(<vscale x 4 x float> %val,<vscale x 4 x float> %val, float* %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vmv2r.v v6, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; CHECK-NEXT: vluxseg2ei8.v v6, (a0), v10, v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg2.mask.nxv4f32.nxv4i8(<vscale x 4 x float> %val,<vscale x 4 x float> %val, float* %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
; CHECK-NEXT: vmv2r.v v6, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; CHECK-NEXT: vluxseg2ei32.v v6, (a0), v10, v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg2.mask.nxv4f32.nxv4i32(<vscale x 4 x float> %val,<vscale x 4 x float> %val, float* %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
define <vscale x 4 x float> @test_vluxseg3_mask_nxv4f32_nxv4i16(<vscale x 4 x float> %val, float* %base, <vscale x 4 x i16> %index, i32 %vl, <vscale x 4 x i1> %mask) {
; CHECK-LABEL: test_vluxseg3_mask_nxv4f32_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v6, v8
+; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
-; CHECK-NEXT: vluxseg3ei16.v v12, (a0), v10, v0.t
-; CHECK-NEXT: vmv2r.v v8, v14
+; CHECK-NEXT: vluxseg3ei16.v v6, (a0), v12, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg3.mask.nxv4f32.nxv4i16(<vscale x 4 x float> %val,<vscale x 4 x float> %val,<vscale x 4 x float> %val, float* %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
define <vscale x 4 x float> @test_vluxseg3_mask_nxv4f32_nxv4i8(<vscale x 4 x float> %val, float* %base, <vscale x 4 x i8> %index, i32 %vl, <vscale x 4 x i1> %mask) {
; CHECK-LABEL: test_vluxseg3_mask_nxv4f32_nxv4i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v6, v8
+; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
-; CHECK-NEXT: vluxseg3ei8.v v12, (a0), v10, v0.t
-; CHECK-NEXT: vmv2r.v v8, v14
+; CHECK-NEXT: vluxseg3ei8.v v6, (a0), v12, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg3.mask.nxv4f32.nxv4i8(<vscale x 4 x float> %val,<vscale x 4 x float> %val,<vscale x 4 x float> %val, float* %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
define <vscale x 4 x float> @test_vluxseg3_mask_nxv4f32_nxv4i32(<vscale x 4 x float> %val, float* %base, <vscale x 4 x i32> %index, i32 %vl, <vscale x 4 x i1> %mask) {
; CHECK-LABEL: test_vluxseg3_mask_nxv4f32_nxv4i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v6, v8
+; CHECK-NEXT: vmv2r.v v12, v10
+; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
-; CHECK-NEXT: vluxseg3ei32.v v12, (a0), v10, v0.t
-; CHECK-NEXT: vmv2r.v v8, v14
+; CHECK-NEXT: vluxseg3ei32.v v6, (a0), v12, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg3.mask.nxv4f32.nxv4i32(<vscale x 4 x float> %val,<vscale x 4 x float> %val,<vscale x 4 x float> %val, float* %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
; CHECK-LABEL: test_vluxseg4_mask_nxv4f32_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; CHECK-NEXT: vluxseg4ei16.v v12, (a0), v10, v0.t
; CHECK-NEXT: vmv2r.v v8, v14
; CHECK-LABEL: test_vluxseg4_mask_nxv4f32_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; CHECK-NEXT: vluxseg4ei8.v v12, (a0), v10, v0.t
; CHECK-NEXT: vmv2r.v v8, v14
; CHECK-LABEL: test_vluxseg4_mask_nxv4f32_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; CHECK-NEXT: vluxseg4ei32.v v12, (a0), v10, v0.t
; CHECK-NEXT: vmv2r.v v8, v14
; CHECK-NEXT: vmv4r.v v4, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
; CHECK-NEXT: vluxseg2ei16.v v4, (a0), v12, v0.t
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 16 x i16>,<vscale x 16 x i16>} @llvm.riscv.vluxseg2.mask.nxv16i16.nxv16i16(<vscale x 16 x i16> %val,<vscale x 16 x i16> %val, i16* %base, <vscale x 16 x i16> %index, <vscale x 16 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv4r.v v4, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
; CHECK-NEXT: vluxseg2ei8.v v4, (a0), v12, v0.t
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 16 x i16>,<vscale x 16 x i16>} @llvm.riscv.vluxseg2.mask.nxv16i16.nxv16i8(<vscale x 16 x i16> %val,<vscale x 16 x i16> %val, i16* %base, <vscale x 16 x i8> %index, <vscale x 16 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv4r.v v4, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
; CHECK-NEXT: vluxseg2ei32.v v4, (a0), v16, v0.t
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 16 x i16>,<vscale x 16 x i16>} @llvm.riscv.vluxseg2.mask.nxv16i16.nxv16i32(<vscale x 16 x i16> %val,<vscale x 16 x i16> %val, i16* %base, <vscale x 16 x i32> %index, <vscale x 16 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv2r.v v6, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; CHECK-NEXT: vluxseg2ei32.v v6, (a0), v10, v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg2.mask.nxv4i32.nxv4i32(<vscale x 4 x i32> %val,<vscale x 4 x i32> %val, i32* %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv2r.v v6, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; CHECK-NEXT: vluxseg2ei8.v v6, (a0), v10, v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg2.mask.nxv4i32.nxv4i8(<vscale x 4 x i32> %val,<vscale x 4 x i32> %val, i32* %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv2r.v v6, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; CHECK-NEXT: vluxseg2ei64.v v6, (a0), v12, v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg2.mask.nxv4i32.nxv4i64(<vscale x 4 x i32> %val,<vscale x 4 x i32> %val, i32* %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv2r.v v6, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; CHECK-NEXT: vluxseg2ei16.v v6, (a0), v10, v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg2.mask.nxv4i32.nxv4i16(<vscale x 4 x i32> %val,<vscale x 4 x i32> %val, i32* %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
define <vscale x 4 x i32> @test_vluxseg3_mask_nxv4i32_nxv4i32(<vscale x 4 x i32> %val, i32* %base, <vscale x 4 x i32> %index, i64 %vl, <vscale x 4 x i1> %mask) {
; CHECK-LABEL: test_vluxseg3_mask_nxv4i32_nxv4i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v6, v8
+; CHECK-NEXT: vmv2r.v v12, v10
+; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
-; CHECK-NEXT: vluxseg3ei32.v v12, (a0), v10, v0.t
-; CHECK-NEXT: vmv2r.v v8, v14
+; CHECK-NEXT: vluxseg3ei32.v v6, (a0), v12, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg3.mask.nxv4i32.nxv4i32(<vscale x 4 x i32> %val,<vscale x 4 x i32> %val,<vscale x 4 x i32> %val, i32* %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
define <vscale x 4 x i32> @test_vluxseg3_mask_nxv4i32_nxv4i8(<vscale x 4 x i32> %val, i32* %base, <vscale x 4 x i8> %index, i64 %vl, <vscale x 4 x i1> %mask) {
; CHECK-LABEL: test_vluxseg3_mask_nxv4i32_nxv4i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v6, v8
+; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
-; CHECK-NEXT: vluxseg3ei8.v v12, (a0), v10, v0.t
-; CHECK-NEXT: vmv2r.v v8, v14
+; CHECK-NEXT: vluxseg3ei8.v v6, (a0), v12, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg3.mask.nxv4i32.nxv4i8(<vscale x 4 x i32> %val,<vscale x 4 x i32> %val,<vscale x 4 x i32> %val, i32* %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
; CHECK-LABEL: test_vluxseg3_mask_nxv4i32_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v6, v8
-; CHECK-NEXT: vmv2r.v v10, v6
+; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; CHECK-NEXT: vluxseg3ei64.v v6, (a0), v12, v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg3.mask.nxv4i32.nxv4i64(<vscale x 4 x i32> %val,<vscale x 4 x i32> %val,<vscale x 4 x i32> %val, i32* %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
define <vscale x 4 x i32> @test_vluxseg3_mask_nxv4i32_nxv4i16(<vscale x 4 x i32> %val, i32* %base, <vscale x 4 x i16> %index, i64 %vl, <vscale x 4 x i1> %mask) {
; CHECK-LABEL: test_vluxseg3_mask_nxv4i32_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v6, v8
+; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
-; CHECK-NEXT: vluxseg3ei16.v v12, (a0), v10, v0.t
-; CHECK-NEXT: vmv2r.v v8, v14
+; CHECK-NEXT: vluxseg3ei16.v v6, (a0), v12, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg3.mask.nxv4i32.nxv4i16(<vscale x 4 x i32> %val,<vscale x 4 x i32> %val,<vscale x 4 x i32> %val, i32* %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
; CHECK-LABEL: test_vluxseg4_mask_nxv4i32_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; CHECK-NEXT: vluxseg4ei32.v v12, (a0), v10, v0.t
; CHECK-NEXT: vmv2r.v v8, v14
; CHECK-LABEL: test_vluxseg4_mask_nxv4i32_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; CHECK-NEXT: vluxseg4ei8.v v12, (a0), v10, v0.t
; CHECK-NEXT: vmv2r.v v8, v14
define <vscale x 4 x i32> @test_vluxseg4_mask_nxv4i32_nxv4i64(<vscale x 4 x i32> %val, i32* %base, <vscale x 4 x i64> %index, i64 %vl, <vscale x 4 x i1> %mask) {
; CHECK-LABEL: test_vluxseg4_mask_nxv4i32_nxv4i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv2r.v v16, v8
-; CHECK-NEXT: vmv2r.v v18, v16
-; CHECK-NEXT: vmv2r.v v20, v16
-; CHECK-NEXT: vmv2r.v v22, v16
+; CHECK-NEXT: vmv2r.v v6, v8
+; CHECK-NEXT: vmv2r.v v10, v8
+; CHECK-NEXT: vmv4r.v v16, v12
+; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
-; CHECK-NEXT: vluxseg4ei64.v v16, (a0), v12, v0.t
-; CHECK-NEXT: vmv2r.v v8, v18
+; CHECK-NEXT: vluxseg4ei64.v v6, (a0), v16, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg4.mask.nxv4i32.nxv4i64(<vscale x 4 x i32> %val,<vscale x 4 x i32> %val,<vscale x 4 x i32> %val,<vscale x 4 x i32> %val, i32* %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
; CHECK-LABEL: test_vluxseg4_mask_nxv4i32_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; CHECK-NEXT: vluxseg4ei16.v v12, (a0), v10, v0.t
; CHECK-NEXT: vmv2r.v v8, v14
; CHECK-NEXT: vmv2r.v v6, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
; CHECK-NEXT: vluxseg2ei16.v v6, (a0), v12, v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg2.mask.nxv16i8.nxv16i16(<vscale x 16 x i8> %val,<vscale x 16 x i8> %val, i8* %base, <vscale x 16 x i16> %index, <vscale x 16 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv2r.v v6, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
; CHECK-NEXT: vluxseg2ei8.v v6, (a0), v10, v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg2.mask.nxv16i8.nxv16i8(<vscale x 16 x i8> %val,<vscale x 16 x i8> %val, i8* %base, <vscale x 16 x i8> %index, <vscale x 16 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv2r.v v6, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
; CHECK-NEXT: vluxseg2ei32.v v6, (a0), v16, v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg2.mask.nxv16i8.nxv16i32(<vscale x 16 x i8> %val,<vscale x 16 x i8> %val, i8* %base, <vscale x 16 x i32> %index, <vscale x 16 x i1> %mask, i64 %vl, i64 1)
; CHECK-LABEL: test_vluxseg3_mask_nxv16i8_nxv16i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v6, v8
-; CHECK-NEXT: vmv2r.v v10, v6
+; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
; CHECK-NEXT: vluxseg3ei16.v v6, (a0), v12, v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg3.mask.nxv16i8.nxv16i16(<vscale x 16 x i8> %val,<vscale x 16 x i8> %val,<vscale x 16 x i8> %val, i8* %base, <vscale x 16 x i16> %index, <vscale x 16 x i1> %mask, i64 %vl, i64 1)
define <vscale x 16 x i8> @test_vluxseg3_mask_nxv16i8_nxv16i8(<vscale x 16 x i8> %val, i8* %base, <vscale x 16 x i8> %index, i64 %vl, <vscale x 16 x i1> %mask) {
; CHECK-LABEL: test_vluxseg3_mask_nxv16i8_nxv16i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v6, v8
+; CHECK-NEXT: vmv2r.v v12, v10
+; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
-; CHECK-NEXT: vluxseg3ei8.v v12, (a0), v10, v0.t
-; CHECK-NEXT: vmv2r.v v8, v14
+; CHECK-NEXT: vluxseg3ei8.v v6, (a0), v12, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg3.mask.nxv16i8.nxv16i8(<vscale x 16 x i8> %val,<vscale x 16 x i8> %val,<vscale x 16 x i8> %val, i8* %base, <vscale x 16 x i8> %index, <vscale x 16 x i1> %mask, i64 %vl, i64 1)
; CHECK-LABEL: test_vluxseg3_mask_nxv16i8_nxv16i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v6, v8
-; CHECK-NEXT: vmv2r.v v10, v6
+; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
; CHECK-NEXT: vluxseg3ei32.v v6, (a0), v16, v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg3.mask.nxv16i8.nxv16i32(<vscale x 16 x i8> %val,<vscale x 16 x i8> %val,<vscale x 16 x i8> %val, i8* %base, <vscale x 16 x i32> %index, <vscale x 16 x i1> %mask, i64 %vl, i64 1)
define <vscale x 16 x i8> @test_vluxseg4_mask_nxv16i8_nxv16i16(<vscale x 16 x i8> %val, i8* %base, <vscale x 16 x i16> %index, i64 %vl, <vscale x 16 x i1> %mask) {
; CHECK-LABEL: test_vluxseg4_mask_nxv16i8_nxv16i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv2r.v v16, v8
-; CHECK-NEXT: vmv2r.v v18, v16
-; CHECK-NEXT: vmv2r.v v20, v16
-; CHECK-NEXT: vmv2r.v v22, v16
+; CHECK-NEXT: vmv2r.v v6, v8
+; CHECK-NEXT: vmv2r.v v10, v8
+; CHECK-NEXT: vmv4r.v v16, v12
+; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
-; CHECK-NEXT: vluxseg4ei16.v v16, (a0), v12, v0.t
-; CHECK-NEXT: vmv2r.v v8, v18
+; CHECK-NEXT: vluxseg4ei16.v v6, (a0), v16, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg4.mask.nxv16i8.nxv16i16(<vscale x 16 x i8> %val,<vscale x 16 x i8> %val,<vscale x 16 x i8> %val,<vscale x 16 x i8> %val, i8* %base, <vscale x 16 x i16> %index, <vscale x 16 x i1> %mask, i64 %vl, i64 1)
; CHECK-LABEL: test_vluxseg4_mask_nxv16i8_nxv16i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
; CHECK-NEXT: vluxseg4ei8.v v12, (a0), v10, v0.t
; CHECK-NEXT: vmv2r.v v8, v14
; CHECK-LABEL: test_vluxseg4_mask_nxv16i8_nxv16i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v6, v8
-; CHECK-NEXT: vmv2r.v v10, v6
-; CHECK-NEXT: vmv2r.v v12, v6
+; CHECK-NEXT: vmv2r.v v10, v8
+; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
; CHECK-NEXT: vluxseg4ei32.v v6, (a0), v16, v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg4.mask.nxv16i8.nxv16i32(<vscale x 16 x i8> %val,<vscale x 16 x i8> %val,<vscale x 16 x i8> %val,<vscale x 16 x i8> %val, i8* %base, <vscale x 16 x i32> %index, <vscale x 16 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vluxseg2ei64.v v7, (a0), v9, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vluxseg2.mask.nxv1i64.nxv1i64(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, i64* %base, <vscale x 1 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vluxseg2ei32.v v7, (a0), v9, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vluxseg2.mask.nxv1i64.nxv1i32(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, i64* %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vluxseg2ei16.v v7, (a0), v9, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vluxseg2.mask.nxv1i64.nxv1i16(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, i64* %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vluxseg2ei8.v v7, (a0), v9, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vluxseg2.mask.nxv1i64.nxv1i8(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, i64* %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
define <vscale x 1 x i64> @test_vluxseg3_mask_nxv1i64_nxv1i64(<vscale x 1 x i64> %val, i64* %base, <vscale x 1 x i64> %index, i64 %vl, <vscale x 1 x i1> %mask) {
; CHECK-LABEL: test_vluxseg3_mask_nxv1i64_nxv1i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v10, v9
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
-; CHECK-NEXT: vluxseg3ei64.v v10, (a0), v9, v0.t
-; CHECK-NEXT: vmv1r.v v8, v11
+; CHECK-NEXT: vluxseg3ei64.v v7, (a0), v10, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vluxseg3.mask.nxv1i64.nxv1i64(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, i64* %base, <vscale x 1 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
define <vscale x 1 x i64> @test_vluxseg3_mask_nxv1i64_nxv1i32(<vscale x 1 x i64> %val, i64* %base, <vscale x 1 x i32> %index, i64 %vl, <vscale x 1 x i1> %mask) {
; CHECK-LABEL: test_vluxseg3_mask_nxv1i64_nxv1i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v10, v9
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
-; CHECK-NEXT: vluxseg3ei32.v v10, (a0), v9, v0.t
-; CHECK-NEXT: vmv1r.v v8, v11
+; CHECK-NEXT: vluxseg3ei32.v v7, (a0), v10, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vluxseg3.mask.nxv1i64.nxv1i32(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, i64* %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
define <vscale x 1 x i64> @test_vluxseg3_mask_nxv1i64_nxv1i16(<vscale x 1 x i64> %val, i64* %base, <vscale x 1 x i16> %index, i64 %vl, <vscale x 1 x i1> %mask) {
; CHECK-LABEL: test_vluxseg3_mask_nxv1i64_nxv1i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v10, v9
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
-; CHECK-NEXT: vluxseg3ei16.v v10, (a0), v9, v0.t
-; CHECK-NEXT: vmv1r.v v8, v11
+; CHECK-NEXT: vluxseg3ei16.v v7, (a0), v10, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vluxseg3.mask.nxv1i64.nxv1i16(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, i64* %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
define <vscale x 1 x i64> @test_vluxseg3_mask_nxv1i64_nxv1i8(<vscale x 1 x i64> %val, i64* %base, <vscale x 1 x i8> %index, i64 %vl, <vscale x 1 x i1> %mask) {
; CHECK-LABEL: test_vluxseg3_mask_nxv1i64_nxv1i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v10, v9
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
-; CHECK-NEXT: vluxseg3ei8.v v10, (a0), v9, v0.t
-; CHECK-NEXT: vmv1r.v v8, v11
+; CHECK-NEXT: vluxseg3ei8.v v7, (a0), v10, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vluxseg3.mask.nxv1i64.nxv1i8(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, i64* %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
; CHECK-LABEL: test_vluxseg4_mask_nxv1i64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vluxseg4ei64.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg4_mask_nxv1i64_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vluxseg4ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg4_mask_nxv1i64_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vluxseg4ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg4_mask_nxv1i64_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vluxseg4ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg5_mask_nxv1i64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vluxseg5ei64.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg5_mask_nxv1i64_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vluxseg5ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg5_mask_nxv1i64_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vluxseg5ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg5_mask_nxv1i64_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vluxseg5ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg6_mask_nxv1i64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vluxseg6ei64.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg6_mask_nxv1i64_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vluxseg6ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg6_mask_nxv1i64_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vluxseg6ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg6_mask_nxv1i64_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vluxseg6ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg7_mask_nxv1i64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vluxseg7ei64.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg7_mask_nxv1i64_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vluxseg7ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg7_mask_nxv1i64_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vluxseg7ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg7_mask_nxv1i64_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vluxseg7ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg8_mask_nxv1i64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vluxseg8ei64.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg8_mask_nxv1i64_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vluxseg8ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg8_mask_nxv1i64_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vluxseg8ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg8_mask_nxv1i64_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vluxseg8ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vluxseg2ei64.v v7, (a0), v9, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg2.mask.nxv1i32.nxv1i64(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 1 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vluxseg2ei32.v v7, (a0), v9, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg2.mask.nxv1i32.nxv1i32(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vluxseg2ei16.v v7, (a0), v9, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg2.mask.nxv1i32.nxv1i16(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vluxseg2ei8.v v7, (a0), v9, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg2.mask.nxv1i32.nxv1i8(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
define <vscale x 1 x i32> @test_vluxseg3_mask_nxv1i32_nxv1i64(<vscale x 1 x i32> %val, i32* %base, <vscale x 1 x i64> %index, i64 %vl, <vscale x 1 x i1> %mask) {
; CHECK-LABEL: test_vluxseg3_mask_nxv1i32_nxv1i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v10, v9
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
-; CHECK-NEXT: vluxseg3ei64.v v10, (a0), v9, v0.t
-; CHECK-NEXT: vmv1r.v v8, v11
+; CHECK-NEXT: vluxseg3ei64.v v7, (a0), v10, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg3.mask.nxv1i32.nxv1i64(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 1 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
define <vscale x 1 x i32> @test_vluxseg3_mask_nxv1i32_nxv1i32(<vscale x 1 x i32> %val, i32* %base, <vscale x 1 x i32> %index, i64 %vl, <vscale x 1 x i1> %mask) {
; CHECK-LABEL: test_vluxseg3_mask_nxv1i32_nxv1i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v10, v9
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
-; CHECK-NEXT: vluxseg3ei32.v v10, (a0), v9, v0.t
-; CHECK-NEXT: vmv1r.v v8, v11
+; CHECK-NEXT: vluxseg3ei32.v v7, (a0), v10, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg3.mask.nxv1i32.nxv1i32(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
define <vscale x 1 x i32> @test_vluxseg3_mask_nxv1i32_nxv1i16(<vscale x 1 x i32> %val, i32* %base, <vscale x 1 x i16> %index, i64 %vl, <vscale x 1 x i1> %mask) {
; CHECK-LABEL: test_vluxseg3_mask_nxv1i32_nxv1i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v10, v9
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
-; CHECK-NEXT: vluxseg3ei16.v v10, (a0), v9, v0.t
-; CHECK-NEXT: vmv1r.v v8, v11
+; CHECK-NEXT: vluxseg3ei16.v v7, (a0), v10, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg3.mask.nxv1i32.nxv1i16(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
define <vscale x 1 x i32> @test_vluxseg3_mask_nxv1i32_nxv1i8(<vscale x 1 x i32> %val, i32* %base, <vscale x 1 x i8> %index, i64 %vl, <vscale x 1 x i1> %mask) {
; CHECK-LABEL: test_vluxseg3_mask_nxv1i32_nxv1i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v10, v9
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
-; CHECK-NEXT: vluxseg3ei8.v v10, (a0), v9, v0.t
-; CHECK-NEXT: vmv1r.v v8, v11
+; CHECK-NEXT: vluxseg3ei8.v v7, (a0), v10, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg3.mask.nxv1i32.nxv1i8(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
; CHECK-LABEL: test_vluxseg4_mask_nxv1i32_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vluxseg4ei64.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg4_mask_nxv1i32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vluxseg4ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg4_mask_nxv1i32_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vluxseg4ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg4_mask_nxv1i32_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vluxseg4ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg5_mask_nxv1i32_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vluxseg5ei64.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg5_mask_nxv1i32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vluxseg5ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg5_mask_nxv1i32_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vluxseg5ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg5_mask_nxv1i32_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vluxseg5ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg6_mask_nxv1i32_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vluxseg6ei64.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg6_mask_nxv1i32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vluxseg6ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg6_mask_nxv1i32_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vluxseg6ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg6_mask_nxv1i32_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vluxseg6ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg7_mask_nxv1i32_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vluxseg7ei64.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg7_mask_nxv1i32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vluxseg7ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg7_mask_nxv1i32_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vluxseg7ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg7_mask_nxv1i32_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vluxseg7ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg8_mask_nxv1i32_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vluxseg8ei64.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg8_mask_nxv1i32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vluxseg8ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg8_mask_nxv1i32_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vluxseg8ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg8_mask_nxv1i32_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vluxseg8ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-NEXT: vmv2r.v v6, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
; CHECK-NEXT: vluxseg2ei16.v v6, (a0), v10, v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg2.mask.nxv8i16.nxv8i16(<vscale x 8 x i16> %val,<vscale x 8 x i16> %val, i16* %base, <vscale x 8 x i16> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv2r.v v6, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
; CHECK-NEXT: vluxseg2ei8.v v6, (a0), v10, v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg2.mask.nxv8i16.nxv8i8(<vscale x 8 x i16> %val,<vscale x 8 x i16> %val, i16* %base, <vscale x 8 x i8> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv2r.v v6, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
; CHECK-NEXT: vluxseg2ei64.v v6, (a0), v16, v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg2.mask.nxv8i16.nxv8i64(<vscale x 8 x i16> %val,<vscale x 8 x i16> %val, i16* %base, <vscale x 8 x i64> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv2r.v v6, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
; CHECK-NEXT: vluxseg2ei32.v v6, (a0), v12, v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg2.mask.nxv8i16.nxv8i32(<vscale x 8 x i16> %val,<vscale x 8 x i16> %val, i16* %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
define <vscale x 8 x i16> @test_vluxseg3_mask_nxv8i16_nxv8i16(<vscale x 8 x i16> %val, i16* %base, <vscale x 8 x i16> %index, i64 %vl, <vscale x 8 x i1> %mask) {
; CHECK-LABEL: test_vluxseg3_mask_nxv8i16_nxv8i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v6, v8
+; CHECK-NEXT: vmv2r.v v12, v10
+; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
-; CHECK-NEXT: vluxseg3ei16.v v12, (a0), v10, v0.t
-; CHECK-NEXT: vmv2r.v v8, v14
+; CHECK-NEXT: vluxseg3ei16.v v6, (a0), v12, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg3.mask.nxv8i16.nxv8i16(<vscale x 8 x i16> %val,<vscale x 8 x i16> %val,<vscale x 8 x i16> %val, i16* %base, <vscale x 8 x i16> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
define <vscale x 8 x i16> @test_vluxseg3_mask_nxv8i16_nxv8i8(<vscale x 8 x i16> %val, i16* %base, <vscale x 8 x i8> %index, i64 %vl, <vscale x 8 x i1> %mask) {
; CHECK-LABEL: test_vluxseg3_mask_nxv8i16_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v6, v8
+; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
-; CHECK-NEXT: vluxseg3ei8.v v12, (a0), v10, v0.t
-; CHECK-NEXT: vmv2r.v v8, v14
+; CHECK-NEXT: vluxseg3ei8.v v6, (a0), v12, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg3.mask.nxv8i16.nxv8i8(<vscale x 8 x i16> %val,<vscale x 8 x i16> %val,<vscale x 8 x i16> %val, i16* %base, <vscale x 8 x i8> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
; CHECK-LABEL: test_vluxseg3_mask_nxv8i16_nxv8i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v6, v8
-; CHECK-NEXT: vmv2r.v v10, v6
+; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
; CHECK-NEXT: vluxseg3ei64.v v6, (a0), v16, v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg3.mask.nxv8i16.nxv8i64(<vscale x 8 x i16> %val,<vscale x 8 x i16> %val,<vscale x 8 x i16> %val, i16* %base, <vscale x 8 x i64> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
; CHECK-LABEL: test_vluxseg3_mask_nxv8i16_nxv8i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v6, v8
-; CHECK-NEXT: vmv2r.v v10, v6
+; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
; CHECK-NEXT: vluxseg3ei32.v v6, (a0), v12, v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg3.mask.nxv8i16.nxv8i32(<vscale x 8 x i16> %val,<vscale x 8 x i16> %val,<vscale x 8 x i16> %val, i16* %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
; CHECK-LABEL: test_vluxseg4_mask_nxv8i16_nxv8i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
; CHECK-NEXT: vluxseg4ei16.v v12, (a0), v10, v0.t
; CHECK-NEXT: vmv2r.v v8, v14
; CHECK-LABEL: test_vluxseg4_mask_nxv8i16_nxv8i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
; CHECK-NEXT: vluxseg4ei8.v v12, (a0), v10, v0.t
; CHECK-NEXT: vmv2r.v v8, v14
; CHECK-LABEL: test_vluxseg4_mask_nxv8i16_nxv8i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v6, v8
-; CHECK-NEXT: vmv2r.v v10, v6
-; CHECK-NEXT: vmv2r.v v12, v6
+; CHECK-NEXT: vmv2r.v v10, v8
+; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
; CHECK-NEXT: vluxseg4ei64.v v6, (a0), v16, v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg4.mask.nxv8i16.nxv8i64(<vscale x 8 x i16> %val,<vscale x 8 x i16> %val,<vscale x 8 x i16> %val,<vscale x 8 x i16> %val, i16* %base, <vscale x 8 x i64> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
define <vscale x 8 x i16> @test_vluxseg4_mask_nxv8i16_nxv8i32(<vscale x 8 x i16> %val, i16* %base, <vscale x 8 x i32> %index, i64 %vl, <vscale x 8 x i1> %mask) {
; CHECK-LABEL: test_vluxseg4_mask_nxv8i16_nxv8i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv2r.v v16, v8
-; CHECK-NEXT: vmv2r.v v18, v16
-; CHECK-NEXT: vmv2r.v v20, v16
-; CHECK-NEXT: vmv2r.v v22, v16
+; CHECK-NEXT: vmv2r.v v6, v8
+; CHECK-NEXT: vmv2r.v v10, v8
+; CHECK-NEXT: vmv4r.v v16, v12
+; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
-; CHECK-NEXT: vluxseg4ei32.v v16, (a0), v12, v0.t
-; CHECK-NEXT: vmv2r.v v8, v18
+; CHECK-NEXT: vluxseg4ei32.v v6, (a0), v16, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg4.mask.nxv8i16.nxv8i32(<vscale x 8 x i16> %val,<vscale x 8 x i16> %val,<vscale x 8 x i16> %val,<vscale x 8 x i16> %val, i16* %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vluxseg2ei32.v v7, (a0), v10, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg2.mask.nxv4i8.nxv4i32(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vluxseg2ei8.v v7, (a0), v9, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg2.mask.nxv4i8.nxv4i8(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vluxseg2ei64.v v7, (a0), v12, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg2.mask.nxv4i8.nxv4i64(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vluxseg2ei16.v v7, (a0), v9, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg2.mask.nxv4i8.nxv4i16(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
; CHECK-LABEL: test_vluxseg3_mask_nxv4i8_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vluxseg3ei32.v v7, (a0), v10, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg3.mask.nxv4i8.nxv4i32(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
define <vscale x 4 x i8> @test_vluxseg3_mask_nxv4i8_nxv4i8(<vscale x 4 x i8> %val, i8* %base, <vscale x 4 x i8> %index, i64 %vl, <vscale x 4 x i1> %mask) {
; CHECK-LABEL: test_vluxseg3_mask_nxv4i8_nxv4i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v10, v9
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
-; CHECK-NEXT: vluxseg3ei8.v v10, (a0), v9, v0.t
-; CHECK-NEXT: vmv1r.v v8, v11
+; CHECK-NEXT: vluxseg3ei8.v v7, (a0), v10, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg3.mask.nxv4i8.nxv4i8(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
; CHECK-LABEL: test_vluxseg3_mask_nxv4i8_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vluxseg3ei64.v v7, (a0), v12, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg3.mask.nxv4i8.nxv4i64(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
define <vscale x 4 x i8> @test_vluxseg3_mask_nxv4i8_nxv4i16(<vscale x 4 x i8> %val, i8* %base, <vscale x 4 x i16> %index, i64 %vl, <vscale x 4 x i1> %mask) {
; CHECK-LABEL: test_vluxseg3_mask_nxv4i8_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v10, v9
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
-; CHECK-NEXT: vluxseg3ei16.v v10, (a0), v9, v0.t
-; CHECK-NEXT: vmv1r.v v8, v11
+; CHECK-NEXT: vluxseg3ei16.v v7, (a0), v10, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg3.mask.nxv4i8.nxv4i16(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
define <vscale x 4 x i8> @test_vluxseg4_mask_nxv4i8_nxv4i32(<vscale x 4 x i8> %val, i8* %base, <vscale x 4 x i32> %index, i64 %vl, <vscale x 4 x i1> %mask) {
; CHECK-LABEL: test_vluxseg4_mask_nxv4i8_nxv4i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv2r.v v12, v10
+; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
-; CHECK-NEXT: vluxseg4ei32.v v12, (a0), v10, v0.t
-; CHECK-NEXT: vmv1r.v v8, v13
+; CHECK-NEXT: vluxseg4ei32.v v7, (a0), v12, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg4.mask.nxv4i8.nxv4i32(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
; CHECK-LABEL: test_vluxseg4_mask_nxv4i8_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vluxseg4ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg4_mask_nxv4i8_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
-; CHECK-NEXT: vmv1r.v v10, v7
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vluxseg4ei64.v v7, (a0), v12, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg4.mask.nxv4i8.nxv4i64(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
; CHECK-LABEL: test_vluxseg4_mask_nxv4i8_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vluxseg4ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg5_mask_nxv4i8_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vluxseg5ei32.v v12, (a0), v10, v0.t
; CHECK-NEXT: vmv1r.v v8, v13
; CHECK-LABEL: test_vluxseg5_mask_nxv4i8_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vluxseg5ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg5_mask_nxv4i8_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
-; CHECK-NEXT: vmv1r.v v10, v7
-; CHECK-NEXT: vmv1r.v v11, v7
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vmv1r.v v11, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vluxseg5ei64.v v7, (a0), v12, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg5.mask.nxv4i8.nxv4i64(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
; CHECK-LABEL: test_vluxseg5_mask_nxv4i8_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vluxseg5ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg6_mask_nxv4i8_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vluxseg6ei32.v v12, (a0), v10, v0.t
; CHECK-NEXT: vmv1r.v v8, v13
; CHECK-LABEL: test_vluxseg6_mask_nxv4i8_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vluxseg6ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
define <vscale x 4 x i8> @test_vluxseg6_mask_nxv4i8_nxv4i64(<vscale x 4 x i8> %val, i8* %base, <vscale x 4 x i64> %index, i64 %vl, <vscale x 4 x i1> %mask) {
; CHECK-LABEL: test_vluxseg6_mask_nxv4i8_nxv4i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v16, v8
-; CHECK-NEXT: vmv1r.v v17, v16
-; CHECK-NEXT: vmv1r.v v18, v16
-; CHECK-NEXT: vmv1r.v v19, v16
-; CHECK-NEXT: vmv1r.v v20, v16
-; CHECK-NEXT: vmv1r.v v21, v16
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv4r.v v16, v12
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
-; CHECK-NEXT: vluxseg6ei64.v v16, (a0), v12, v0.t
-; CHECK-NEXT: vmv1r.v v8, v17
+; CHECK-NEXT: vluxseg6ei64.v v7, (a0), v16, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg6.mask.nxv4i8.nxv4i64(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
; CHECK-LABEL: test_vluxseg6_mask_nxv4i8_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vluxseg6ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg7_mask_nxv4i8_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
-; CHECK-NEXT: vmv1r.v v18, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vluxseg7ei32.v v12, (a0), v10, v0.t
; CHECK-NEXT: vmv1r.v v8, v13
; CHECK-LABEL: test_vluxseg7_mask_nxv4i8_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vluxseg7ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg7_mask_nxv4i8_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v16, v8
-; CHECK-NEXT: vmv1r.v v17, v16
-; CHECK-NEXT: vmv1r.v v18, v16
-; CHECK-NEXT: vmv1r.v v19, v16
-; CHECK-NEXT: vmv1r.v v20, v16
-; CHECK-NEXT: vmv1r.v v21, v16
-; CHECK-NEXT: vmv1r.v v22, v16
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
+; CHECK-NEXT: vmv1r.v v19, v8
+; CHECK-NEXT: vmv1r.v v20, v8
+; CHECK-NEXT: vmv1r.v v21, v8
+; CHECK-NEXT: vmv1r.v v22, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vluxseg7ei64.v v16, (a0), v12, v0.t
; CHECK-NEXT: vmv1r.v v8, v17
; CHECK-LABEL: test_vluxseg7_mask_nxv4i8_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vluxseg7ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg8_mask_nxv4i8_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
-; CHECK-NEXT: vmv1r.v v18, v12
-; CHECK-NEXT: vmv1r.v v19, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
+; CHECK-NEXT: vmv1r.v v19, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vluxseg8ei32.v v12, (a0), v10, v0.t
; CHECK-NEXT: vmv1r.v v8, v13
; CHECK-LABEL: test_vluxseg8_mask_nxv4i8_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vluxseg8ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg8_mask_nxv4i8_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v16, v8
-; CHECK-NEXT: vmv1r.v v17, v16
-; CHECK-NEXT: vmv1r.v v18, v16
-; CHECK-NEXT: vmv1r.v v19, v16
-; CHECK-NEXT: vmv1r.v v20, v16
-; CHECK-NEXT: vmv1r.v v21, v16
-; CHECK-NEXT: vmv1r.v v22, v16
-; CHECK-NEXT: vmv1r.v v23, v16
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
+; CHECK-NEXT: vmv1r.v v19, v8
+; CHECK-NEXT: vmv1r.v v20, v8
+; CHECK-NEXT: vmv1r.v v21, v8
+; CHECK-NEXT: vmv1r.v v22, v8
+; CHECK-NEXT: vmv1r.v v23, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vluxseg8ei64.v v16, (a0), v12, v0.t
; CHECK-NEXT: vmv1r.v v8, v17
; CHECK-LABEL: test_vluxseg8_mask_nxv4i8_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vluxseg8ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vluxseg2ei64.v v7, (a0), v9, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg2.mask.nxv1i16.nxv1i64(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 1 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vluxseg2ei32.v v7, (a0), v9, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg2.mask.nxv1i16.nxv1i32(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vluxseg2ei16.v v7, (a0), v9, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg2.mask.nxv1i16.nxv1i16(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vluxseg2ei8.v v7, (a0), v9, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg2.mask.nxv1i16.nxv1i8(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
define <vscale x 1 x i16> @test_vluxseg3_mask_nxv1i16_nxv1i64(<vscale x 1 x i16> %val, i16* %base, <vscale x 1 x i64> %index, i64 %vl, <vscale x 1 x i1> %mask) {
; CHECK-LABEL: test_vluxseg3_mask_nxv1i16_nxv1i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v10, v9
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
-; CHECK-NEXT: vluxseg3ei64.v v10, (a0), v9, v0.t
-; CHECK-NEXT: vmv1r.v v8, v11
+; CHECK-NEXT: vluxseg3ei64.v v7, (a0), v10, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg3.mask.nxv1i16.nxv1i64(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 1 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
define <vscale x 1 x i16> @test_vluxseg3_mask_nxv1i16_nxv1i32(<vscale x 1 x i16> %val, i16* %base, <vscale x 1 x i32> %index, i64 %vl, <vscale x 1 x i1> %mask) {
; CHECK-LABEL: test_vluxseg3_mask_nxv1i16_nxv1i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v10, v9
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
-; CHECK-NEXT: vluxseg3ei32.v v10, (a0), v9, v0.t
-; CHECK-NEXT: vmv1r.v v8, v11
+; CHECK-NEXT: vluxseg3ei32.v v7, (a0), v10, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg3.mask.nxv1i16.nxv1i32(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
define <vscale x 1 x i16> @test_vluxseg3_mask_nxv1i16_nxv1i16(<vscale x 1 x i16> %val, i16* %base, <vscale x 1 x i16> %index, i64 %vl, <vscale x 1 x i1> %mask) {
; CHECK-LABEL: test_vluxseg3_mask_nxv1i16_nxv1i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v10, v9
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
-; CHECK-NEXT: vluxseg3ei16.v v10, (a0), v9, v0.t
-; CHECK-NEXT: vmv1r.v v8, v11
+; CHECK-NEXT: vluxseg3ei16.v v7, (a0), v10, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg3.mask.nxv1i16.nxv1i16(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
define <vscale x 1 x i16> @test_vluxseg3_mask_nxv1i16_nxv1i8(<vscale x 1 x i16> %val, i16* %base, <vscale x 1 x i8> %index, i64 %vl, <vscale x 1 x i1> %mask) {
; CHECK-LABEL: test_vluxseg3_mask_nxv1i16_nxv1i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v10, v9
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
-; CHECK-NEXT: vluxseg3ei8.v v10, (a0), v9, v0.t
-; CHECK-NEXT: vmv1r.v v8, v11
+; CHECK-NEXT: vluxseg3ei8.v v7, (a0), v10, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg3.mask.nxv1i16.nxv1i8(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
; CHECK-LABEL: test_vluxseg4_mask_nxv1i16_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vluxseg4ei64.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg4_mask_nxv1i16_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vluxseg4ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg4_mask_nxv1i16_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vluxseg4ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg4_mask_nxv1i16_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vluxseg4ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg5_mask_nxv1i16_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vluxseg5ei64.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg5_mask_nxv1i16_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vluxseg5ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg5_mask_nxv1i16_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vluxseg5ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg5_mask_nxv1i16_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vluxseg5ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg6_mask_nxv1i16_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vluxseg6ei64.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg6_mask_nxv1i16_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vluxseg6ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg6_mask_nxv1i16_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vluxseg6ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg6_mask_nxv1i16_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vluxseg6ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg7_mask_nxv1i16_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vluxseg7ei64.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg7_mask_nxv1i16_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vluxseg7ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg7_mask_nxv1i16_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vluxseg7ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg7_mask_nxv1i16_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vluxseg7ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg8_mask_nxv1i16_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vluxseg8ei64.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg8_mask_nxv1i16_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vluxseg8ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg8_mask_nxv1i16_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vluxseg8ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg8_mask_nxv1i16_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vluxseg8ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vluxseg2ei32.v v7, (a0), v9, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg2.mask.nxv2i32.nxv2i32(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vluxseg2ei8.v v7, (a0), v9, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg2.mask.nxv2i32.nxv2i8(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vluxseg2ei16.v v7, (a0), v9, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg2.mask.nxv2i32.nxv2i16(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vluxseg2ei64.v v7, (a0), v10, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg2.mask.nxv2i32.nxv2i64(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 2 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
define <vscale x 2 x i32> @test_vluxseg3_mask_nxv2i32_nxv2i32(<vscale x 2 x i32> %val, i32* %base, <vscale x 2 x i32> %index, i64 %vl, <vscale x 2 x i1> %mask) {
; CHECK-LABEL: test_vluxseg3_mask_nxv2i32_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v10, v9
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
-; CHECK-NEXT: vluxseg3ei32.v v10, (a0), v9, v0.t
-; CHECK-NEXT: vmv1r.v v8, v11
+; CHECK-NEXT: vluxseg3ei32.v v7, (a0), v10, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg3.mask.nxv2i32.nxv2i32(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
define <vscale x 2 x i32> @test_vluxseg3_mask_nxv2i32_nxv2i8(<vscale x 2 x i32> %val, i32* %base, <vscale x 2 x i8> %index, i64 %vl, <vscale x 2 x i1> %mask) {
; CHECK-LABEL: test_vluxseg3_mask_nxv2i32_nxv2i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v10, v9
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
-; CHECK-NEXT: vluxseg3ei8.v v10, (a0), v9, v0.t
-; CHECK-NEXT: vmv1r.v v8, v11
+; CHECK-NEXT: vluxseg3ei8.v v7, (a0), v10, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg3.mask.nxv2i32.nxv2i8(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
define <vscale x 2 x i32> @test_vluxseg3_mask_nxv2i32_nxv2i16(<vscale x 2 x i32> %val, i32* %base, <vscale x 2 x i16> %index, i64 %vl, <vscale x 2 x i1> %mask) {
; CHECK-LABEL: test_vluxseg3_mask_nxv2i32_nxv2i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v10, v9
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
-; CHECK-NEXT: vluxseg3ei16.v v10, (a0), v9, v0.t
-; CHECK-NEXT: vmv1r.v v8, v11
+; CHECK-NEXT: vluxseg3ei16.v v7, (a0), v10, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg3.mask.nxv2i32.nxv2i16(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
; CHECK-LABEL: test_vluxseg3_mask_nxv2i32_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vluxseg3ei64.v v7, (a0), v10, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg3.mask.nxv2i32.nxv2i64(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 2 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
; CHECK-LABEL: test_vluxseg4_mask_nxv2i32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vluxseg4ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg4_mask_nxv2i32_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vluxseg4ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg4_mask_nxv2i32_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vluxseg4ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
define <vscale x 2 x i32> @test_vluxseg4_mask_nxv2i32_nxv2i64(<vscale x 2 x i32> %val, i32* %base, <vscale x 2 x i64> %index, i64 %vl, <vscale x 2 x i1> %mask) {
; CHECK-LABEL: test_vluxseg4_mask_nxv2i32_nxv2i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv2r.v v12, v10
+; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
-; CHECK-NEXT: vluxseg4ei64.v v12, (a0), v10, v0.t
-; CHECK-NEXT: vmv1r.v v8, v13
+; CHECK-NEXT: vluxseg4ei64.v v7, (a0), v12, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg4.mask.nxv2i32.nxv2i64(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 2 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
; CHECK-LABEL: test_vluxseg5_mask_nxv2i32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vluxseg5ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg5_mask_nxv2i32_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vluxseg5ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg5_mask_nxv2i32_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vluxseg5ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg5_mask_nxv2i32_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vluxseg5ei64.v v12, (a0), v10, v0.t
; CHECK-NEXT: vmv1r.v v8, v13
; CHECK-LABEL: test_vluxseg6_mask_nxv2i32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vluxseg6ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg6_mask_nxv2i32_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vluxseg6ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg6_mask_nxv2i32_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vluxseg6ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg6_mask_nxv2i32_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vluxseg6ei64.v v12, (a0), v10, v0.t
; CHECK-NEXT: vmv1r.v v8, v13
; CHECK-LABEL: test_vluxseg7_mask_nxv2i32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vluxseg7ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg7_mask_nxv2i32_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vluxseg7ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg7_mask_nxv2i32_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vluxseg7ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg7_mask_nxv2i32_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
-; CHECK-NEXT: vmv1r.v v18, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vluxseg7ei64.v v12, (a0), v10, v0.t
; CHECK-NEXT: vmv1r.v v8, v13
; CHECK-LABEL: test_vluxseg8_mask_nxv2i32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vluxseg8ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg8_mask_nxv2i32_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vluxseg8ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg8_mask_nxv2i32_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vluxseg8ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg8_mask_nxv2i32_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
-; CHECK-NEXT: vmv1r.v v18, v12
-; CHECK-NEXT: vmv1r.v v19, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
+; CHECK-NEXT: vmv1r.v v19, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vluxseg8ei64.v v12, (a0), v10, v0.t
; CHECK-NEXT: vmv1r.v v8, v13
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vluxseg2ei16.v v7, (a0), v10, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg2.mask.nxv8i8.nxv8i16(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i16> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vluxseg2ei8.v v7, (a0), v9, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg2.mask.nxv8i8.nxv8i8(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i8> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vluxseg2ei64.v v7, (a0), v16, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg2.mask.nxv8i8.nxv8i64(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i64> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vluxseg2ei32.v v7, (a0), v12, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg2.mask.nxv8i8.nxv8i32(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
; CHECK-LABEL: test_vluxseg3_mask_nxv8i8_nxv8i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vluxseg3ei16.v v7, (a0), v10, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg3.mask.nxv8i8.nxv8i16(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i16> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
define <vscale x 8 x i8> @test_vluxseg3_mask_nxv8i8_nxv8i8(<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i8> %index, i64 %vl, <vscale x 8 x i1> %mask) {
; CHECK-LABEL: test_vluxseg3_mask_nxv8i8_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v10, v9
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
-; CHECK-NEXT: vluxseg3ei8.v v10, (a0), v9, v0.t
-; CHECK-NEXT: vmv1r.v v8, v11
+; CHECK-NEXT: vluxseg3ei8.v v7, (a0), v10, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg3.mask.nxv8i8.nxv8i8(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i8> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
; CHECK-LABEL: test_vluxseg3_mask_nxv8i8_nxv8i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vluxseg3ei64.v v7, (a0), v16, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg3.mask.nxv8i8.nxv8i64(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i64> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
; CHECK-LABEL: test_vluxseg3_mask_nxv8i8_nxv8i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vluxseg3ei32.v v7, (a0), v12, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg3.mask.nxv8i8.nxv8i32(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
define <vscale x 8 x i8> @test_vluxseg4_mask_nxv8i8_nxv8i16(<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i16> %index, i64 %vl, <vscale x 8 x i1> %mask) {
; CHECK-LABEL: test_vluxseg4_mask_nxv8i8_nxv8i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv2r.v v12, v10
+; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
-; CHECK-NEXT: vluxseg4ei16.v v12, (a0), v10, v0.t
-; CHECK-NEXT: vmv1r.v v8, v13
+; CHECK-NEXT: vluxseg4ei16.v v7, (a0), v12, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg4.mask.nxv8i8.nxv8i16(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i16> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
; CHECK-LABEL: test_vluxseg4_mask_nxv8i8_nxv8i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vluxseg4ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg4_mask_nxv8i8_nxv8i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
-; CHECK-NEXT: vmv1r.v v10, v7
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vluxseg4ei64.v v7, (a0), v16, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg4.mask.nxv8i8.nxv8i64(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i64> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
; CHECK-LABEL: test_vluxseg4_mask_nxv8i8_nxv8i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
-; CHECK-NEXT: vmv1r.v v10, v7
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vluxseg4ei32.v v7, (a0), v12, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg4.mask.nxv8i8.nxv8i32(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
; CHECK-LABEL: test_vluxseg5_mask_nxv8i8_nxv8i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vluxseg5ei16.v v12, (a0), v10, v0.t
; CHECK-NEXT: vmv1r.v v8, v13
; CHECK-LABEL: test_vluxseg5_mask_nxv8i8_nxv8i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vluxseg5ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg5_mask_nxv8i8_nxv8i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
-; CHECK-NEXT: vmv1r.v v10, v7
-; CHECK-NEXT: vmv1r.v v11, v7
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vmv1r.v v11, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vluxseg5ei64.v v7, (a0), v16, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg5.mask.nxv8i8.nxv8i64(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i64> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
; CHECK-LABEL: test_vluxseg5_mask_nxv8i8_nxv8i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
-; CHECK-NEXT: vmv1r.v v10, v7
-; CHECK-NEXT: vmv1r.v v11, v7
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vmv1r.v v11, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vluxseg5ei32.v v7, (a0), v12, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg5.mask.nxv8i8.nxv8i32(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
; CHECK-LABEL: test_vluxseg6_mask_nxv8i8_nxv8i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vluxseg6ei16.v v12, (a0), v10, v0.t
; CHECK-NEXT: vmv1r.v v8, v13
; CHECK-LABEL: test_vluxseg6_mask_nxv8i8_nxv8i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vluxseg6ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg6_mask_nxv8i8_nxv8i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
-; CHECK-NEXT: vmv1r.v v10, v7
-; CHECK-NEXT: vmv1r.v v11, v7
-; CHECK-NEXT: vmv1r.v v12, v7
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vluxseg6ei64.v v7, (a0), v16, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg6.mask.nxv8i8.nxv8i64(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i64> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
define <vscale x 8 x i8> @test_vluxseg6_mask_nxv8i8_nxv8i32(<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i32> %index, i64 %vl, <vscale x 8 x i1> %mask) {
; CHECK-LABEL: test_vluxseg6_mask_nxv8i8_nxv8i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v16, v8
-; CHECK-NEXT: vmv1r.v v17, v16
-; CHECK-NEXT: vmv1r.v v18, v16
-; CHECK-NEXT: vmv1r.v v19, v16
-; CHECK-NEXT: vmv1r.v v20, v16
-; CHECK-NEXT: vmv1r.v v21, v16
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv4r.v v16, v12
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
-; CHECK-NEXT: vluxseg6ei32.v v16, (a0), v12, v0.t
-; CHECK-NEXT: vmv1r.v v8, v17
+; CHECK-NEXT: vluxseg6ei32.v v7, (a0), v16, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg6.mask.nxv8i8.nxv8i32(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
; CHECK-LABEL: test_vluxseg7_mask_nxv8i8_nxv8i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
-; CHECK-NEXT: vmv1r.v v18, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vluxseg7ei16.v v12, (a0), v10, v0.t
; CHECK-NEXT: vmv1r.v v8, v13
; CHECK-LABEL: test_vluxseg7_mask_nxv8i8_nxv8i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vluxseg7ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg7_mask_nxv8i8_nxv8i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
-; CHECK-NEXT: vmv1r.v v10, v7
-; CHECK-NEXT: vmv1r.v v11, v7
-; CHECK-NEXT: vmv1r.v v12, v7
-; CHECK-NEXT: vmv1r.v v13, v7
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vluxseg7ei64.v v7, (a0), v16, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg7.mask.nxv8i8.nxv8i64(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i64> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
; CHECK-LABEL: test_vluxseg7_mask_nxv8i8_nxv8i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v16, v8
-; CHECK-NEXT: vmv1r.v v17, v16
-; CHECK-NEXT: vmv1r.v v18, v16
-; CHECK-NEXT: vmv1r.v v19, v16
-; CHECK-NEXT: vmv1r.v v20, v16
-; CHECK-NEXT: vmv1r.v v21, v16
-; CHECK-NEXT: vmv1r.v v22, v16
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
+; CHECK-NEXT: vmv1r.v v19, v8
+; CHECK-NEXT: vmv1r.v v20, v8
+; CHECK-NEXT: vmv1r.v v21, v8
+; CHECK-NEXT: vmv1r.v v22, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vluxseg7ei32.v v16, (a0), v12, v0.t
; CHECK-NEXT: vmv1r.v v8, v17
; CHECK-LABEL: test_vluxseg8_mask_nxv8i8_nxv8i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
-; CHECK-NEXT: vmv1r.v v18, v12
-; CHECK-NEXT: vmv1r.v v19, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
+; CHECK-NEXT: vmv1r.v v19, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vluxseg8ei16.v v12, (a0), v10, v0.t
; CHECK-NEXT: vmv1r.v v8, v13
; CHECK-LABEL: test_vluxseg8_mask_nxv8i8_nxv8i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vluxseg8ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg8_mask_nxv8i8_nxv8i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
-; CHECK-NEXT: vmv1r.v v10, v7
-; CHECK-NEXT: vmv1r.v v11, v7
-; CHECK-NEXT: vmv1r.v v12, v7
-; CHECK-NEXT: vmv1r.v v13, v7
-; CHECK-NEXT: vmv1r.v v14, v7
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vluxseg8ei64.v v7, (a0), v16, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg8.mask.nxv8i8.nxv8i64(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i64> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
; CHECK-LABEL: test_vluxseg8_mask_nxv8i8_nxv8i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v16, v8
-; CHECK-NEXT: vmv1r.v v17, v16
-; CHECK-NEXT: vmv1r.v v18, v16
-; CHECK-NEXT: vmv1r.v v19, v16
-; CHECK-NEXT: vmv1r.v v20, v16
-; CHECK-NEXT: vmv1r.v v21, v16
-; CHECK-NEXT: vmv1r.v v22, v16
-; CHECK-NEXT: vmv1r.v v23, v16
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
+; CHECK-NEXT: vmv1r.v v19, v8
+; CHECK-NEXT: vmv1r.v v20, v8
+; CHECK-NEXT: vmv1r.v v21, v8
+; CHECK-NEXT: vmv1r.v v22, v8
+; CHECK-NEXT: vmv1r.v v23, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vluxseg8ei32.v v16, (a0), v12, v0.t
; CHECK-NEXT: vmv1r.v v8, v17
; CHECK-NEXT: vmv4r.v v4, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu
; CHECK-NEXT: vluxseg2ei32.v v4, (a0), v12, v0.t
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i64>,<vscale x 4 x i64>} @llvm.riscv.vluxseg2.mask.nxv4i64.nxv4i32(<vscale x 4 x i64> %val,<vscale x 4 x i64> %val, i64* %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv4r.v v4, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu
; CHECK-NEXT: vluxseg2ei8.v v4, (a0), v12, v0.t
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i64>,<vscale x 4 x i64>} @llvm.riscv.vluxseg2.mask.nxv4i64.nxv4i8(<vscale x 4 x i64> %val,<vscale x 4 x i64> %val, i64* %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv4r.v v4, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu
; CHECK-NEXT: vluxseg2ei64.v v4, (a0), v12, v0.t
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i64>,<vscale x 4 x i64>} @llvm.riscv.vluxseg2.mask.nxv4i64.nxv4i64(<vscale x 4 x i64> %val,<vscale x 4 x i64> %val, i64* %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv4r.v v4, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu
; CHECK-NEXT: vluxseg2ei16.v v4, (a0), v12, v0.t
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i64>,<vscale x 4 x i64>} @llvm.riscv.vluxseg2.mask.nxv4i64.nxv4i16(<vscale x 4 x i64> %val,<vscale x 4 x i64> %val, i64* %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vluxseg2ei32.v v7, (a0), v10, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg2.mask.nxv4i16.nxv4i32(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vluxseg2ei8.v v7, (a0), v9, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg2.mask.nxv4i16.nxv4i8(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vluxseg2ei64.v v7, (a0), v12, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg2.mask.nxv4i16.nxv4i64(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vluxseg2ei16.v v7, (a0), v9, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg2.mask.nxv4i16.nxv4i16(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
; CHECK-LABEL: test_vluxseg3_mask_nxv4i16_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vluxseg3ei32.v v7, (a0), v10, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg3.mask.nxv4i16.nxv4i32(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
define <vscale x 4 x i16> @test_vluxseg3_mask_nxv4i16_nxv4i8(<vscale x 4 x i16> %val, i16* %base, <vscale x 4 x i8> %index, i64 %vl, <vscale x 4 x i1> %mask) {
; CHECK-LABEL: test_vluxseg3_mask_nxv4i16_nxv4i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v10, v9
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
-; CHECK-NEXT: vluxseg3ei8.v v10, (a0), v9, v0.t
-; CHECK-NEXT: vmv1r.v v8, v11
+; CHECK-NEXT: vluxseg3ei8.v v7, (a0), v10, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg3.mask.nxv4i16.nxv4i8(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
; CHECK-LABEL: test_vluxseg3_mask_nxv4i16_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vluxseg3ei64.v v7, (a0), v12, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg3.mask.nxv4i16.nxv4i64(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
define <vscale x 4 x i16> @test_vluxseg3_mask_nxv4i16_nxv4i16(<vscale x 4 x i16> %val, i16* %base, <vscale x 4 x i16> %index, i64 %vl, <vscale x 4 x i1> %mask) {
; CHECK-LABEL: test_vluxseg3_mask_nxv4i16_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v10, v9
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
-; CHECK-NEXT: vluxseg3ei16.v v10, (a0), v9, v0.t
-; CHECK-NEXT: vmv1r.v v8, v11
+; CHECK-NEXT: vluxseg3ei16.v v7, (a0), v10, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg3.mask.nxv4i16.nxv4i16(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
define <vscale x 4 x i16> @test_vluxseg4_mask_nxv4i16_nxv4i32(<vscale x 4 x i16> %val, i16* %base, <vscale x 4 x i32> %index, i64 %vl, <vscale x 4 x i1> %mask) {
; CHECK-LABEL: test_vluxseg4_mask_nxv4i16_nxv4i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv2r.v v12, v10
+; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
-; CHECK-NEXT: vluxseg4ei32.v v12, (a0), v10, v0.t
-; CHECK-NEXT: vmv1r.v v8, v13
+; CHECK-NEXT: vluxseg4ei32.v v7, (a0), v12, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg4.mask.nxv4i16.nxv4i32(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
; CHECK-LABEL: test_vluxseg4_mask_nxv4i16_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vluxseg4ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg4_mask_nxv4i16_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
-; CHECK-NEXT: vmv1r.v v10, v7
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vluxseg4ei64.v v7, (a0), v12, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg4.mask.nxv4i16.nxv4i64(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
; CHECK-LABEL: test_vluxseg4_mask_nxv4i16_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vluxseg4ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg5_mask_nxv4i16_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vluxseg5ei32.v v12, (a0), v10, v0.t
; CHECK-NEXT: vmv1r.v v8, v13
; CHECK-LABEL: test_vluxseg5_mask_nxv4i16_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vluxseg5ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg5_mask_nxv4i16_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
-; CHECK-NEXT: vmv1r.v v10, v7
-; CHECK-NEXT: vmv1r.v v11, v7
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vmv1r.v v11, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vluxseg5ei64.v v7, (a0), v12, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg5.mask.nxv4i16.nxv4i64(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
; CHECK-LABEL: test_vluxseg5_mask_nxv4i16_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vluxseg5ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg6_mask_nxv4i16_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vluxseg6ei32.v v12, (a0), v10, v0.t
; CHECK-NEXT: vmv1r.v v8, v13
; CHECK-LABEL: test_vluxseg6_mask_nxv4i16_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vluxseg6ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
define <vscale x 4 x i16> @test_vluxseg6_mask_nxv4i16_nxv4i64(<vscale x 4 x i16> %val, i16* %base, <vscale x 4 x i64> %index, i64 %vl, <vscale x 4 x i1> %mask) {
; CHECK-LABEL: test_vluxseg6_mask_nxv4i16_nxv4i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v16, v8
-; CHECK-NEXT: vmv1r.v v17, v16
-; CHECK-NEXT: vmv1r.v v18, v16
-; CHECK-NEXT: vmv1r.v v19, v16
-; CHECK-NEXT: vmv1r.v v20, v16
-; CHECK-NEXT: vmv1r.v v21, v16
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv4r.v v16, v12
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
-; CHECK-NEXT: vluxseg6ei64.v v16, (a0), v12, v0.t
-; CHECK-NEXT: vmv1r.v v8, v17
+; CHECK-NEXT: vluxseg6ei64.v v7, (a0), v16, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg6.mask.nxv4i16.nxv4i64(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
; CHECK-LABEL: test_vluxseg6_mask_nxv4i16_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vluxseg6ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg7_mask_nxv4i16_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
-; CHECK-NEXT: vmv1r.v v18, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vluxseg7ei32.v v12, (a0), v10, v0.t
; CHECK-NEXT: vmv1r.v v8, v13
; CHECK-LABEL: test_vluxseg7_mask_nxv4i16_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vluxseg7ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg7_mask_nxv4i16_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v16, v8
-; CHECK-NEXT: vmv1r.v v17, v16
-; CHECK-NEXT: vmv1r.v v18, v16
-; CHECK-NEXT: vmv1r.v v19, v16
-; CHECK-NEXT: vmv1r.v v20, v16
-; CHECK-NEXT: vmv1r.v v21, v16
-; CHECK-NEXT: vmv1r.v v22, v16
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
+; CHECK-NEXT: vmv1r.v v19, v8
+; CHECK-NEXT: vmv1r.v v20, v8
+; CHECK-NEXT: vmv1r.v v21, v8
+; CHECK-NEXT: vmv1r.v v22, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vluxseg7ei64.v v16, (a0), v12, v0.t
; CHECK-NEXT: vmv1r.v v8, v17
; CHECK-LABEL: test_vluxseg7_mask_nxv4i16_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vluxseg7ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg8_mask_nxv4i16_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
-; CHECK-NEXT: vmv1r.v v18, v12
-; CHECK-NEXT: vmv1r.v v19, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
+; CHECK-NEXT: vmv1r.v v19, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vluxseg8ei32.v v12, (a0), v10, v0.t
; CHECK-NEXT: vmv1r.v v8, v13
; CHECK-LABEL: test_vluxseg8_mask_nxv4i16_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vluxseg8ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg8_mask_nxv4i16_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v16, v8
-; CHECK-NEXT: vmv1r.v v17, v16
-; CHECK-NEXT: vmv1r.v v18, v16
-; CHECK-NEXT: vmv1r.v v19, v16
-; CHECK-NEXT: vmv1r.v v20, v16
-; CHECK-NEXT: vmv1r.v v21, v16
-; CHECK-NEXT: vmv1r.v v22, v16
-; CHECK-NEXT: vmv1r.v v23, v16
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
+; CHECK-NEXT: vmv1r.v v19, v8
+; CHECK-NEXT: vmv1r.v v20, v8
+; CHECK-NEXT: vmv1r.v v21, v8
+; CHECK-NEXT: vmv1r.v v22, v8
+; CHECK-NEXT: vmv1r.v v23, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vluxseg8ei64.v v16, (a0), v12, v0.t
; CHECK-NEXT: vmv1r.v v8, v17
; CHECK-LABEL: test_vluxseg8_mask_nxv4i16_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vluxseg8ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vluxseg2ei64.v v7, (a0), v9, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg2.mask.nxv1i8.nxv1i64(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, <vscale x 1 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vluxseg2ei32.v v7, (a0), v9, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg2.mask.nxv1i8.nxv1i32(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vluxseg2ei16.v v7, (a0), v9, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg2.mask.nxv1i8.nxv1i16(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vluxseg2ei8.v v7, (a0), v9, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg2.mask.nxv1i8.nxv1i8(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
define <vscale x 1 x i8> @test_vluxseg3_mask_nxv1i8_nxv1i64(<vscale x 1 x i8> %val, i8* %base, <vscale x 1 x i64> %index, i64 %vl, <vscale x 1 x i1> %mask) {
; CHECK-LABEL: test_vluxseg3_mask_nxv1i8_nxv1i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v10, v9
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
-; CHECK-NEXT: vluxseg3ei64.v v10, (a0), v9, v0.t
-; CHECK-NEXT: vmv1r.v v8, v11
+; CHECK-NEXT: vluxseg3ei64.v v7, (a0), v10, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg3.mask.nxv1i8.nxv1i64(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, <vscale x 1 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
define <vscale x 1 x i8> @test_vluxseg3_mask_nxv1i8_nxv1i32(<vscale x 1 x i8> %val, i8* %base, <vscale x 1 x i32> %index, i64 %vl, <vscale x 1 x i1> %mask) {
; CHECK-LABEL: test_vluxseg3_mask_nxv1i8_nxv1i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v10, v9
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
-; CHECK-NEXT: vluxseg3ei32.v v10, (a0), v9, v0.t
-; CHECK-NEXT: vmv1r.v v8, v11
+; CHECK-NEXT: vluxseg3ei32.v v7, (a0), v10, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg3.mask.nxv1i8.nxv1i32(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
define <vscale x 1 x i8> @test_vluxseg3_mask_nxv1i8_nxv1i16(<vscale x 1 x i8> %val, i8* %base, <vscale x 1 x i16> %index, i64 %vl, <vscale x 1 x i1> %mask) {
; CHECK-LABEL: test_vluxseg3_mask_nxv1i8_nxv1i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v10, v9
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
-; CHECK-NEXT: vluxseg3ei16.v v10, (a0), v9, v0.t
-; CHECK-NEXT: vmv1r.v v8, v11
+; CHECK-NEXT: vluxseg3ei16.v v7, (a0), v10, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg3.mask.nxv1i8.nxv1i16(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
define <vscale x 1 x i8> @test_vluxseg3_mask_nxv1i8_nxv1i8(<vscale x 1 x i8> %val, i8* %base, <vscale x 1 x i8> %index, i64 %vl, <vscale x 1 x i1> %mask) {
; CHECK-LABEL: test_vluxseg3_mask_nxv1i8_nxv1i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v10, v9
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
-; CHECK-NEXT: vluxseg3ei8.v v10, (a0), v9, v0.t
-; CHECK-NEXT: vmv1r.v v8, v11
+; CHECK-NEXT: vluxseg3ei8.v v7, (a0), v10, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg3.mask.nxv1i8.nxv1i8(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, i8* %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
; CHECK-LABEL: test_vluxseg4_mask_nxv1i8_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vluxseg4ei64.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg4_mask_nxv1i8_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vluxseg4ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg4_mask_nxv1i8_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vluxseg4ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg4_mask_nxv1i8_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vluxseg4ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg5_mask_nxv1i8_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vluxseg5ei64.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg5_mask_nxv1i8_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vluxseg5ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg5_mask_nxv1i8_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vluxseg5ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg5_mask_nxv1i8_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vluxseg5ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg6_mask_nxv1i8_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vluxseg6ei64.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg6_mask_nxv1i8_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vluxseg6ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg6_mask_nxv1i8_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vluxseg6ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg6_mask_nxv1i8_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vluxseg6ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg7_mask_nxv1i8_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vluxseg7ei64.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg7_mask_nxv1i8_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vluxseg7ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg7_mask_nxv1i8_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vluxseg7ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg7_mask_nxv1i8_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vluxseg7ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg8_mask_nxv1i8_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vluxseg8ei64.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg8_mask_nxv1i8_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vluxseg8ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg8_mask_nxv1i8_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
-; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vluxseg8ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-NEXT: ret
; CHECK-LABEL: test_vluxseg8_mask_nxv1i8_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vluxseg8ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vluxseg2ei32.v v7, (a0), v9, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg2.mask.nxv2i8.nxv2i32(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, i8* %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vluxseg2ei8.v v7, (a0), v9, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg2.mask.nxv2i8.nxv2i8(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, i8* %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vluxseg2ei16.v v7, (a0), v9, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg2.mask.nxv2i8.nxv2i16(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, i8* %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vluxseg2ei64.v v7, (a0), v10, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg2.mask.nxv2i8.nxv2i64(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, i8* %base, <vscale x 2 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
define <vscale x 2 x i8> @test_vluxseg3_mask_nxv2i8_nxv2i32(<vscale x 2 x i8> %val, i8* %base, <vscale x 2 x i32> %index, i64 %vl, <vscale x 2 x i1> %mask) {
; CHECK-LABEL: test_vluxseg3_mask_nxv2i8_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v10, v9
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
-; CHECK-NEXT: vluxseg3ei32.v v10, (a0), v9, v0.t
-; CHECK-NEXT: vmv1r.v v8, v11
+; CHECK-NEXT: vluxseg3ei32.v v7, (a0), v10, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg3.mask.nxv2i8.nxv2i32(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, i8* %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
define <vscale x 2 x i8> @test_vluxseg3_mask_nxv2i8_nxv2i8(<vscale x 2 x i8> %val, i8* %base, <vscale x 2 x i8> %index, i64 %vl, <vscale x 2 x i1> %mask) {
; CHECK-LABEL: test_vluxseg3_mask_nxv2i8_nxv2i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v10, v9
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
-; CHECK-NEXT: vluxseg3ei8.v v10, (a0), v9, v0.t
-; CHECK-NEXT: vmv1r.v v8, v11
+; CHECK-NEXT: vluxseg3ei8.v v7, (a0), v10, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg3.mask.nxv2i8.nxv2i8(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, i8* %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
define <vscale x 2 x i8> @test_vluxseg3_mask_nxv2i8_nxv2i16(<vscale x 2 x i8> %val, i8* %base, <vscale x 2 x i16> %index, i64 %vl, <vscale x 2 x i1> %mask) {
; CHECK-LABEL: test_vluxseg3_mask_nxv2i8_nxv2i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v10, v9
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
-; CHECK-NEXT: vluxseg3ei16.v v10, (a0), v9, v0.t
-; CHECK-NEXT: vmv1r.v v8, v11
+; CHECK-NEXT: vluxseg3ei16.v v7, (a0), v10, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg3.mask.nxv2i8.nxv2i16(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, i8* %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
; CHECK-LABEL: test_vluxseg3_mask_nxv2i8_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vluxseg3ei64.v v7, (a0), v10, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg3.mask.nxv2i8.nxv2i64(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, i8* %base, <vscale x 2 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
; CHECK-LABEL: test_vluxseg4_mask_nxv2i8_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vluxseg4ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg4_mask_nxv2i8_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vluxseg4ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg4_mask_nxv2i8_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vluxseg4ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
define <vscale x 2 x i8> @test_vluxseg4_mask_nxv2i8_nxv2i64(<vscale x 2 x i8> %val, i8* %base, <vscale x 2 x i64> %index, i64 %vl, <vscale x 2 x i1> %mask) {
; CHECK-LABEL: test_vluxseg4_mask_nxv2i8_nxv2i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv2r.v v12, v10
+; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
-; CHECK-NEXT: vluxseg4ei64.v v12, (a0), v10, v0.t
-; CHECK-NEXT: vmv1r.v v8, v13
+; CHECK-NEXT: vluxseg4ei64.v v7, (a0), v12, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg4.mask.nxv2i8.nxv2i64(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, i8* %base, <vscale x 2 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
; CHECK-LABEL: test_vluxseg5_mask_nxv2i8_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vluxseg5ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg5_mask_nxv2i8_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vluxseg5ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg5_mask_nxv2i8_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vluxseg5ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg5_mask_nxv2i8_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vluxseg5ei64.v v12, (a0), v10, v0.t
; CHECK-NEXT: vmv1r.v v8, v13
; CHECK-LABEL: test_vluxseg6_mask_nxv2i8_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vluxseg6ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg6_mask_nxv2i8_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vluxseg6ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg6_mask_nxv2i8_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vluxseg6ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg6_mask_nxv2i8_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vluxseg6ei64.v v12, (a0), v10, v0.t
; CHECK-NEXT: vmv1r.v v8, v13
; CHECK-LABEL: test_vluxseg7_mask_nxv2i8_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vluxseg7ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg7_mask_nxv2i8_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vluxseg7ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg7_mask_nxv2i8_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vluxseg7ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg7_mask_nxv2i8_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
-; CHECK-NEXT: vmv1r.v v18, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vluxseg7ei64.v v12, (a0), v10, v0.t
; CHECK-NEXT: vmv1r.v v8, v13
; CHECK-LABEL: test_vluxseg8_mask_nxv2i8_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vluxseg8ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg8_mask_nxv2i8_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vluxseg8ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg8_mask_nxv2i8_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vluxseg8ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg8_mask_nxv2i8_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
-; CHECK-NEXT: vmv1r.v v18, v12
-; CHECK-NEXT: vmv1r.v v19, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
+; CHECK-NEXT: vmv1r.v v19, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vluxseg8ei64.v v12, (a0), v10, v0.t
; CHECK-NEXT: vmv1r.v v8, v13
; CHECK-NEXT: vmv4r.v v4, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
; CHECK-NEXT: vluxseg2ei16.v v4, (a0), v12, v0.t
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i32>,<vscale x 8 x i32>} @llvm.riscv.vluxseg2.mask.nxv8i32.nxv8i16(<vscale x 8 x i32> %val,<vscale x 8 x i32> %val, i32* %base, <vscale x 8 x i16> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv4r.v v4, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
; CHECK-NEXT: vluxseg2ei8.v v4, (a0), v12, v0.t
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i32>,<vscale x 8 x i32>} @llvm.riscv.vluxseg2.mask.nxv8i32.nxv8i8(<vscale x 8 x i32> %val,<vscale x 8 x i32> %val, i32* %base, <vscale x 8 x i8> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv4r.v v4, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
; CHECK-NEXT: vluxseg2ei64.v v4, (a0), v16, v0.t
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i32>,<vscale x 8 x i32>} @llvm.riscv.vluxseg2.mask.nxv8i32.nxv8i64(<vscale x 8 x i32> %val,<vscale x 8 x i32> %val, i32* %base, <vscale x 8 x i64> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv4r.v v4, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
; CHECK-NEXT: vluxseg2ei32.v v4, (a0), v12, v0.t
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x i32>,<vscale x 8 x i32>} @llvm.riscv.vluxseg2.mask.nxv8i32.nxv8i32(<vscale x 8 x i32> %val,<vscale x 8 x i32> %val, i32* %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv4r.v v4, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu
; CHECK-NEXT: vluxseg2ei16.v v4, (a0), v16, v0.t
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 32 x i8>,<vscale x 32 x i8>} @llvm.riscv.vluxseg2.mask.nxv32i8.nxv32i16(<vscale x 32 x i8> %val,<vscale x 32 x i8> %val, i8* %base, <vscale x 32 x i16> %index, <vscale x 32 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv4r.v v4, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu
; CHECK-NEXT: vluxseg2ei8.v v4, (a0), v12, v0.t
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 32 x i8>,<vscale x 32 x i8>} @llvm.riscv.vluxseg2.mask.nxv32i8.nxv32i8(<vscale x 32 x i8> %val,<vscale x 32 x i8> %val, i8* %base, <vscale x 32 x i8> %index, <vscale x 32 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vluxseg2ei32.v v7, (a0), v9, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg2.mask.nxv2i16.nxv2i32(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, i16* %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vluxseg2ei8.v v7, (a0), v9, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg2.mask.nxv2i16.nxv2i8(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, i16* %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vluxseg2ei16.v v7, (a0), v9, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg2.mask.nxv2i16.nxv2i16(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, i16* %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vluxseg2ei64.v v7, (a0), v10, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg2.mask.nxv2i16.nxv2i64(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, i16* %base, <vscale x 2 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
define <vscale x 2 x i16> @test_vluxseg3_mask_nxv2i16_nxv2i32(<vscale x 2 x i16> %val, i16* %base, <vscale x 2 x i32> %index, i64 %vl, <vscale x 2 x i1> %mask) {
; CHECK-LABEL: test_vluxseg3_mask_nxv2i16_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v10, v9
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
-; CHECK-NEXT: vluxseg3ei32.v v10, (a0), v9, v0.t
-; CHECK-NEXT: vmv1r.v v8, v11
+; CHECK-NEXT: vluxseg3ei32.v v7, (a0), v10, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg3.mask.nxv2i16.nxv2i32(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, i16* %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
define <vscale x 2 x i16> @test_vluxseg3_mask_nxv2i16_nxv2i8(<vscale x 2 x i16> %val, i16* %base, <vscale x 2 x i8> %index, i64 %vl, <vscale x 2 x i1> %mask) {
; CHECK-LABEL: test_vluxseg3_mask_nxv2i16_nxv2i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v10, v9
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
-; CHECK-NEXT: vluxseg3ei8.v v10, (a0), v9, v0.t
-; CHECK-NEXT: vmv1r.v v8, v11
+; CHECK-NEXT: vluxseg3ei8.v v7, (a0), v10, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg3.mask.nxv2i16.nxv2i8(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, i16* %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
define <vscale x 2 x i16> @test_vluxseg3_mask_nxv2i16_nxv2i16(<vscale x 2 x i16> %val, i16* %base, <vscale x 2 x i16> %index, i64 %vl, <vscale x 2 x i1> %mask) {
; CHECK-LABEL: test_vluxseg3_mask_nxv2i16_nxv2i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v10, v9
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
-; CHECK-NEXT: vluxseg3ei16.v v10, (a0), v9, v0.t
-; CHECK-NEXT: vmv1r.v v8, v11
+; CHECK-NEXT: vluxseg3ei16.v v7, (a0), v10, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg3.mask.nxv2i16.nxv2i16(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, i16* %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
; CHECK-LABEL: test_vluxseg3_mask_nxv2i16_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vluxseg3ei64.v v7, (a0), v10, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg3.mask.nxv2i16.nxv2i64(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, i16* %base, <vscale x 2 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
; CHECK-LABEL: test_vluxseg4_mask_nxv2i16_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vluxseg4ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg4_mask_nxv2i16_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vluxseg4ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg4_mask_nxv2i16_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vluxseg4ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
define <vscale x 2 x i16> @test_vluxseg4_mask_nxv2i16_nxv2i64(<vscale x 2 x i16> %val, i16* %base, <vscale x 2 x i64> %index, i64 %vl, <vscale x 2 x i1> %mask) {
; CHECK-LABEL: test_vluxseg4_mask_nxv2i16_nxv2i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv2r.v v12, v10
+; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
-; CHECK-NEXT: vluxseg4ei64.v v12, (a0), v10, v0.t
-; CHECK-NEXT: vmv1r.v v8, v13
+; CHECK-NEXT: vluxseg4ei64.v v7, (a0), v12, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg4.mask.nxv2i16.nxv2i64(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, i16* %base, <vscale x 2 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
; CHECK-LABEL: test_vluxseg5_mask_nxv2i16_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vluxseg5ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg5_mask_nxv2i16_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vluxseg5ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg5_mask_nxv2i16_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vluxseg5ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg5_mask_nxv2i16_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vluxseg5ei64.v v12, (a0), v10, v0.t
; CHECK-NEXT: vmv1r.v v8, v13
; CHECK-LABEL: test_vluxseg6_mask_nxv2i16_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vluxseg6ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg6_mask_nxv2i16_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vluxseg6ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg6_mask_nxv2i16_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vluxseg6ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg6_mask_nxv2i16_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vluxseg6ei64.v v12, (a0), v10, v0.t
; CHECK-NEXT: vmv1r.v v8, v13
; CHECK-LABEL: test_vluxseg7_mask_nxv2i16_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vluxseg7ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg7_mask_nxv2i16_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vluxseg7ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg7_mask_nxv2i16_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vluxseg7ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg7_mask_nxv2i16_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
-; CHECK-NEXT: vmv1r.v v18, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vluxseg7ei64.v v12, (a0), v10, v0.t
; CHECK-NEXT: vmv1r.v v8, v13
; CHECK-LABEL: test_vluxseg8_mask_nxv2i16_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vluxseg8ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg8_mask_nxv2i16_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vluxseg8ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg8_mask_nxv2i16_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vluxseg8ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg8_mask_nxv2i16_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
-; CHECK-NEXT: vmv1r.v v18, v12
-; CHECK-NEXT: vmv1r.v v19, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
+; CHECK-NEXT: vmv1r.v v19, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vluxseg8ei64.v v12, (a0), v10, v0.t
; CHECK-NEXT: vmv1r.v v8, v13
; CHECK-NEXT: vmv2r.v v6, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
; CHECK-NEXT: vluxseg2ei32.v v6, (a0), v10, v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i64>,<vscale x 2 x i64>} @llvm.riscv.vluxseg2.mask.nxv2i64.nxv2i32(<vscale x 2 x i64> %val,<vscale x 2 x i64> %val, i64* %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv2r.v v6, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
; CHECK-NEXT: vluxseg2ei8.v v6, (a0), v10, v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i64>,<vscale x 2 x i64>} @llvm.riscv.vluxseg2.mask.nxv2i64.nxv2i8(<vscale x 2 x i64> %val,<vscale x 2 x i64> %val, i64* %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv2r.v v6, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
; CHECK-NEXT: vluxseg2ei16.v v6, (a0), v10, v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i64>,<vscale x 2 x i64>} @llvm.riscv.vluxseg2.mask.nxv2i64.nxv2i16(<vscale x 2 x i64> %val,<vscale x 2 x i64> %val, i64* %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv2r.v v6, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
; CHECK-NEXT: vluxseg2ei64.v v6, (a0), v10, v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i64>,<vscale x 2 x i64>} @llvm.riscv.vluxseg2.mask.nxv2i64.nxv2i64(<vscale x 2 x i64> %val,<vscale x 2 x i64> %val, i64* %base, <vscale x 2 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
define <vscale x 2 x i64> @test_vluxseg3_mask_nxv2i64_nxv2i32(<vscale x 2 x i64> %val, i64* %base, <vscale x 2 x i32> %index, i64 %vl, <vscale x 2 x i1> %mask) {
; CHECK-LABEL: test_vluxseg3_mask_nxv2i64_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v6, v8
+; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
-; CHECK-NEXT: vluxseg3ei32.v v12, (a0), v10, v0.t
-; CHECK-NEXT: vmv2r.v v8, v14
+; CHECK-NEXT: vluxseg3ei32.v v6, (a0), v12, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>} @llvm.riscv.vluxseg3.mask.nxv2i64.nxv2i32(<vscale x 2 x i64> %val,<vscale x 2 x i64> %val,<vscale x 2 x i64> %val, i64* %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
define <vscale x 2 x i64> @test_vluxseg3_mask_nxv2i64_nxv2i8(<vscale x 2 x i64> %val, i64* %base, <vscale x 2 x i8> %index, i64 %vl, <vscale x 2 x i1> %mask) {
; CHECK-LABEL: test_vluxseg3_mask_nxv2i64_nxv2i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v6, v8
+; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
-; CHECK-NEXT: vluxseg3ei8.v v12, (a0), v10, v0.t
-; CHECK-NEXT: vmv2r.v v8, v14
+; CHECK-NEXT: vluxseg3ei8.v v6, (a0), v12, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>} @llvm.riscv.vluxseg3.mask.nxv2i64.nxv2i8(<vscale x 2 x i64> %val,<vscale x 2 x i64> %val,<vscale x 2 x i64> %val, i64* %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
define <vscale x 2 x i64> @test_vluxseg3_mask_nxv2i64_nxv2i16(<vscale x 2 x i64> %val, i64* %base, <vscale x 2 x i16> %index, i64 %vl, <vscale x 2 x i1> %mask) {
; CHECK-LABEL: test_vluxseg3_mask_nxv2i64_nxv2i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v6, v8
+; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
-; CHECK-NEXT: vluxseg3ei16.v v12, (a0), v10, v0.t
-; CHECK-NEXT: vmv2r.v v8, v14
+; CHECK-NEXT: vluxseg3ei16.v v6, (a0), v12, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>} @llvm.riscv.vluxseg3.mask.nxv2i64.nxv2i16(<vscale x 2 x i64> %val,<vscale x 2 x i64> %val,<vscale x 2 x i64> %val, i64* %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
define <vscale x 2 x i64> @test_vluxseg3_mask_nxv2i64_nxv2i64(<vscale x 2 x i64> %val, i64* %base, <vscale x 2 x i64> %index, i64 %vl, <vscale x 2 x i1> %mask) {
; CHECK-LABEL: test_vluxseg3_mask_nxv2i64_nxv2i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v6, v8
+; CHECK-NEXT: vmv2r.v v12, v10
+; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
-; CHECK-NEXT: vluxseg3ei64.v v12, (a0), v10, v0.t
-; CHECK-NEXT: vmv2r.v v8, v14
+; CHECK-NEXT: vluxseg3ei64.v v6, (a0), v12, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>} @llvm.riscv.vluxseg3.mask.nxv2i64.nxv2i64(<vscale x 2 x i64> %val,<vscale x 2 x i64> %val,<vscale x 2 x i64> %val, i64* %base, <vscale x 2 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
; CHECK-LABEL: test_vluxseg4_mask_nxv2i64_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
; CHECK-NEXT: vluxseg4ei32.v v12, (a0), v10, v0.t
; CHECK-NEXT: vmv2r.v v8, v14
; CHECK-LABEL: test_vluxseg4_mask_nxv2i64_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
; CHECK-NEXT: vluxseg4ei8.v v12, (a0), v10, v0.t
; CHECK-NEXT: vmv2r.v v8, v14
; CHECK-LABEL: test_vluxseg4_mask_nxv2i64_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
; CHECK-NEXT: vluxseg4ei16.v v12, (a0), v10, v0.t
; CHECK-NEXT: vmv2r.v v8, v14
; CHECK-LABEL: test_vluxseg4_mask_nxv2i64_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
; CHECK-NEXT: vluxseg4ei64.v v12, (a0), v10, v0.t
; CHECK-NEXT: vmv2r.v v8, v14
; CHECK-NEXT: vmv4r.v v4, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
; CHECK-NEXT: vluxseg2ei16.v v4, (a0), v12, v0.t
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 16 x half>,<vscale x 16 x half>} @llvm.riscv.vluxseg2.mask.nxv16f16.nxv16i16(<vscale x 16 x half> %val,<vscale x 16 x half> %val, half* %base, <vscale x 16 x i16> %index, <vscale x 16 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv4r.v v4, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
; CHECK-NEXT: vluxseg2ei8.v v4, (a0), v12, v0.t
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 16 x half>,<vscale x 16 x half>} @llvm.riscv.vluxseg2.mask.nxv16f16.nxv16i8(<vscale x 16 x half> %val,<vscale x 16 x half> %val, half* %base, <vscale x 16 x i8> %index, <vscale x 16 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv4r.v v4, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
; CHECK-NEXT: vluxseg2ei32.v v4, (a0), v16, v0.t
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 16 x half>,<vscale x 16 x half>} @llvm.riscv.vluxseg2.mask.nxv16f16.nxv16i32(<vscale x 16 x half> %val,<vscale x 16 x half> %val, half* %base, <vscale x 16 x i32> %index, <vscale x 16 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv4r.v v4, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu
; CHECK-NEXT: vluxseg2ei32.v v4, (a0), v12, v0.t
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x double>,<vscale x 4 x double>} @llvm.riscv.vluxseg2.mask.nxv4f64.nxv4i32(<vscale x 4 x double> %val,<vscale x 4 x double> %val, double* %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv4r.v v4, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu
; CHECK-NEXT: vluxseg2ei8.v v4, (a0), v12, v0.t
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x double>,<vscale x 4 x double>} @llvm.riscv.vluxseg2.mask.nxv4f64.nxv4i8(<vscale x 4 x double> %val,<vscale x 4 x double> %val, double* %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv4r.v v4, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu
; CHECK-NEXT: vluxseg2ei64.v v4, (a0), v12, v0.t
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x double>,<vscale x 4 x double>} @llvm.riscv.vluxseg2.mask.nxv4f64.nxv4i64(<vscale x 4 x double> %val,<vscale x 4 x double> %val, double* %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv4r.v v4, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu
; CHECK-NEXT: vluxseg2ei16.v v4, (a0), v12, v0.t
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x double>,<vscale x 4 x double>} @llvm.riscv.vluxseg2.mask.nxv4f64.nxv4i16(<vscale x 4 x double> %val,<vscale x 4 x double> %val, double* %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vluxseg2ei64.v v7, (a0), v9, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg2.mask.nxv1f64.nxv1i64(<vscale x 1 x double> %val,<vscale x 1 x double> %val, double* %base, <vscale x 1 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vluxseg2ei32.v v7, (a0), v9, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg2.mask.nxv1f64.nxv1i32(<vscale x 1 x double> %val,<vscale x 1 x double> %val, double* %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vluxseg2ei16.v v7, (a0), v9, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg2.mask.nxv1f64.nxv1i16(<vscale x 1 x double> %val,<vscale x 1 x double> %val, double* %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vluxseg2ei8.v v7, (a0), v9, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg2.mask.nxv1f64.nxv1i8(<vscale x 1 x double> %val,<vscale x 1 x double> %val, double* %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
define <vscale x 1 x double> @test_vluxseg3_mask_nxv1f64_nxv1i64(<vscale x 1 x double> %val, double* %base, <vscale x 1 x i64> %index, i64 %vl, <vscale x 1 x i1> %mask) {
; CHECK-LABEL: test_vluxseg3_mask_nxv1f64_nxv1i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v10, v9
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
-; CHECK-NEXT: vluxseg3ei64.v v10, (a0), v9, v0.t
-; CHECK-NEXT: vmv1r.v v8, v11
+; CHECK-NEXT: vluxseg3ei64.v v7, (a0), v10, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg3.mask.nxv1f64.nxv1i64(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, double* %base, <vscale x 1 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
define <vscale x 1 x double> @test_vluxseg3_mask_nxv1f64_nxv1i32(<vscale x 1 x double> %val, double* %base, <vscale x 1 x i32> %index, i64 %vl, <vscale x 1 x i1> %mask) {
; CHECK-LABEL: test_vluxseg3_mask_nxv1f64_nxv1i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v10, v9
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
-; CHECK-NEXT: vluxseg3ei32.v v10, (a0), v9, v0.t
-; CHECK-NEXT: vmv1r.v v8, v11
+; CHECK-NEXT: vluxseg3ei32.v v7, (a0), v10, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg3.mask.nxv1f64.nxv1i32(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, double* %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
define <vscale x 1 x double> @test_vluxseg3_mask_nxv1f64_nxv1i16(<vscale x 1 x double> %val, double* %base, <vscale x 1 x i16> %index, i64 %vl, <vscale x 1 x i1> %mask) {
; CHECK-LABEL: test_vluxseg3_mask_nxv1f64_nxv1i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v10, v9
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
-; CHECK-NEXT: vluxseg3ei16.v v10, (a0), v9, v0.t
-; CHECK-NEXT: vmv1r.v v8, v11
+; CHECK-NEXT: vluxseg3ei16.v v7, (a0), v10, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg3.mask.nxv1f64.nxv1i16(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, double* %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
define <vscale x 1 x double> @test_vluxseg3_mask_nxv1f64_nxv1i8(<vscale x 1 x double> %val, double* %base, <vscale x 1 x i8> %index, i64 %vl, <vscale x 1 x i1> %mask) {
; CHECK-LABEL: test_vluxseg3_mask_nxv1f64_nxv1i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v10, v9
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
-; CHECK-NEXT: vluxseg3ei8.v v10, (a0), v9, v0.t
-; CHECK-NEXT: vmv1r.v v8, v11
+; CHECK-NEXT: vluxseg3ei8.v v7, (a0), v10, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg3.mask.nxv1f64.nxv1i8(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, double* %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
; CHECK-LABEL: test_vluxseg4_mask_nxv1f64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vluxseg4ei64.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg4_mask_nxv1f64_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vluxseg4ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg4_mask_nxv1f64_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vluxseg4ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg4_mask_nxv1f64_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vluxseg4ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg5_mask_nxv1f64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vluxseg5ei64.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg5_mask_nxv1f64_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vluxseg5ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg5_mask_nxv1f64_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vluxseg5ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg5_mask_nxv1f64_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vluxseg5ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg6_mask_nxv1f64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vluxseg6ei64.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg6_mask_nxv1f64_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vluxseg6ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg6_mask_nxv1f64_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vluxseg6ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg6_mask_nxv1f64_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vluxseg6ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg7_mask_nxv1f64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vluxseg7ei64.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg7_mask_nxv1f64_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vluxseg7ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg7_mask_nxv1f64_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vluxseg7ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg7_mask_nxv1f64_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vluxseg7ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg8_mask_nxv1f64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vluxseg8ei64.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg8_mask_nxv1f64_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vluxseg8ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg8_mask_nxv1f64_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vluxseg8ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg8_mask_nxv1f64_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vluxseg8ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vluxseg2ei32.v v7, (a0), v9, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg2.mask.nxv2f32.nxv2i32(<vscale x 2 x float> %val,<vscale x 2 x float> %val, float* %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vluxseg2ei8.v v7, (a0), v9, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg2.mask.nxv2f32.nxv2i8(<vscale x 2 x float> %val,<vscale x 2 x float> %val, float* %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vluxseg2ei16.v v7, (a0), v9, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg2.mask.nxv2f32.nxv2i16(<vscale x 2 x float> %val,<vscale x 2 x float> %val, float* %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vluxseg2ei64.v v7, (a0), v10, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg2.mask.nxv2f32.nxv2i64(<vscale x 2 x float> %val,<vscale x 2 x float> %val, float* %base, <vscale x 2 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
define <vscale x 2 x float> @test_vluxseg3_mask_nxv2f32_nxv2i32(<vscale x 2 x float> %val, float* %base, <vscale x 2 x i32> %index, i64 %vl, <vscale x 2 x i1> %mask) {
; CHECK-LABEL: test_vluxseg3_mask_nxv2f32_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v10, v9
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
-; CHECK-NEXT: vluxseg3ei32.v v10, (a0), v9, v0.t
-; CHECK-NEXT: vmv1r.v v8, v11
+; CHECK-NEXT: vluxseg3ei32.v v7, (a0), v10, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg3.mask.nxv2f32.nxv2i32(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, float* %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
define <vscale x 2 x float> @test_vluxseg3_mask_nxv2f32_nxv2i8(<vscale x 2 x float> %val, float* %base, <vscale x 2 x i8> %index, i64 %vl, <vscale x 2 x i1> %mask) {
; CHECK-LABEL: test_vluxseg3_mask_nxv2f32_nxv2i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v10, v9
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
-; CHECK-NEXT: vluxseg3ei8.v v10, (a0), v9, v0.t
-; CHECK-NEXT: vmv1r.v v8, v11
+; CHECK-NEXT: vluxseg3ei8.v v7, (a0), v10, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg3.mask.nxv2f32.nxv2i8(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, float* %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
define <vscale x 2 x float> @test_vluxseg3_mask_nxv2f32_nxv2i16(<vscale x 2 x float> %val, float* %base, <vscale x 2 x i16> %index, i64 %vl, <vscale x 2 x i1> %mask) {
; CHECK-LABEL: test_vluxseg3_mask_nxv2f32_nxv2i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v10, v9
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
-; CHECK-NEXT: vluxseg3ei16.v v10, (a0), v9, v0.t
-; CHECK-NEXT: vmv1r.v v8, v11
+; CHECK-NEXT: vluxseg3ei16.v v7, (a0), v10, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg3.mask.nxv2f32.nxv2i16(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, float* %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
; CHECK-LABEL: test_vluxseg3_mask_nxv2f32_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vluxseg3ei64.v v7, (a0), v10, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg3.mask.nxv2f32.nxv2i64(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, float* %base, <vscale x 2 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
; CHECK-LABEL: test_vluxseg4_mask_nxv2f32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vluxseg4ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg4_mask_nxv2f32_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vluxseg4ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg4_mask_nxv2f32_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vluxseg4ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
define <vscale x 2 x float> @test_vluxseg4_mask_nxv2f32_nxv2i64(<vscale x 2 x float> %val, float* %base, <vscale x 2 x i64> %index, i64 %vl, <vscale x 2 x i1> %mask) {
; CHECK-LABEL: test_vluxseg4_mask_nxv2f32_nxv2i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv2r.v v12, v10
+; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
-; CHECK-NEXT: vluxseg4ei64.v v12, (a0), v10, v0.t
-; CHECK-NEXT: vmv1r.v v8, v13
+; CHECK-NEXT: vluxseg4ei64.v v7, (a0), v12, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg4.mask.nxv2f32.nxv2i64(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, float* %base, <vscale x 2 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
; CHECK-LABEL: test_vluxseg5_mask_nxv2f32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vluxseg5ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg5_mask_nxv2f32_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vluxseg5ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg5_mask_nxv2f32_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vluxseg5ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg5_mask_nxv2f32_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vluxseg5ei64.v v12, (a0), v10, v0.t
; CHECK-NEXT: vmv1r.v v8, v13
; CHECK-LABEL: test_vluxseg6_mask_nxv2f32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vluxseg6ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg6_mask_nxv2f32_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vluxseg6ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg6_mask_nxv2f32_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vluxseg6ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg6_mask_nxv2f32_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vluxseg6ei64.v v12, (a0), v10, v0.t
; CHECK-NEXT: vmv1r.v v8, v13
; CHECK-LABEL: test_vluxseg7_mask_nxv2f32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vluxseg7ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg7_mask_nxv2f32_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vluxseg7ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg7_mask_nxv2f32_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vluxseg7ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg7_mask_nxv2f32_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
-; CHECK-NEXT: vmv1r.v v18, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vluxseg7ei64.v v12, (a0), v10, v0.t
; CHECK-NEXT: vmv1r.v v8, v13
; CHECK-LABEL: test_vluxseg8_mask_nxv2f32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vluxseg8ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg8_mask_nxv2f32_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vluxseg8ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg8_mask_nxv2f32_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vluxseg8ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg8_mask_nxv2f32_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
-; CHECK-NEXT: vmv1r.v v18, v12
-; CHECK-NEXT: vmv1r.v v19, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
+; CHECK-NEXT: vmv1r.v v19, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vluxseg8ei64.v v12, (a0), v10, v0.t
; CHECK-NEXT: vmv1r.v v8, v13
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vluxseg2ei64.v v7, (a0), v9, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg2.mask.nxv1f16.nxv1i64(<vscale x 1 x half> %val,<vscale x 1 x half> %val, half* %base, <vscale x 1 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vluxseg2ei32.v v7, (a0), v9, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg2.mask.nxv1f16.nxv1i32(<vscale x 1 x half> %val,<vscale x 1 x half> %val, half* %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vluxseg2ei16.v v7, (a0), v9, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg2.mask.nxv1f16.nxv1i16(<vscale x 1 x half> %val,<vscale x 1 x half> %val, half* %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vluxseg2ei8.v v7, (a0), v9, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg2.mask.nxv1f16.nxv1i8(<vscale x 1 x half> %val,<vscale x 1 x half> %val, half* %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
define <vscale x 1 x half> @test_vluxseg3_mask_nxv1f16_nxv1i64(<vscale x 1 x half> %val, half* %base, <vscale x 1 x i64> %index, i64 %vl, <vscale x 1 x i1> %mask) {
; CHECK-LABEL: test_vluxseg3_mask_nxv1f16_nxv1i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v10, v9
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
-; CHECK-NEXT: vluxseg3ei64.v v10, (a0), v9, v0.t
-; CHECK-NEXT: vmv1r.v v8, v11
+; CHECK-NEXT: vluxseg3ei64.v v7, (a0), v10, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg3.mask.nxv1f16.nxv1i64(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, half* %base, <vscale x 1 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
define <vscale x 1 x half> @test_vluxseg3_mask_nxv1f16_nxv1i32(<vscale x 1 x half> %val, half* %base, <vscale x 1 x i32> %index, i64 %vl, <vscale x 1 x i1> %mask) {
; CHECK-LABEL: test_vluxseg3_mask_nxv1f16_nxv1i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v10, v9
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
-; CHECK-NEXT: vluxseg3ei32.v v10, (a0), v9, v0.t
-; CHECK-NEXT: vmv1r.v v8, v11
+; CHECK-NEXT: vluxseg3ei32.v v7, (a0), v10, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg3.mask.nxv1f16.nxv1i32(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, half* %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
define <vscale x 1 x half> @test_vluxseg3_mask_nxv1f16_nxv1i16(<vscale x 1 x half> %val, half* %base, <vscale x 1 x i16> %index, i64 %vl, <vscale x 1 x i1> %mask) {
; CHECK-LABEL: test_vluxseg3_mask_nxv1f16_nxv1i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v10, v9
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
-; CHECK-NEXT: vluxseg3ei16.v v10, (a0), v9, v0.t
-; CHECK-NEXT: vmv1r.v v8, v11
+; CHECK-NEXT: vluxseg3ei16.v v7, (a0), v10, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg3.mask.nxv1f16.nxv1i16(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, half* %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
define <vscale x 1 x half> @test_vluxseg3_mask_nxv1f16_nxv1i8(<vscale x 1 x half> %val, half* %base, <vscale x 1 x i8> %index, i64 %vl, <vscale x 1 x i1> %mask) {
; CHECK-LABEL: test_vluxseg3_mask_nxv1f16_nxv1i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v10, v9
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
-; CHECK-NEXT: vluxseg3ei8.v v10, (a0), v9, v0.t
-; CHECK-NEXT: vmv1r.v v8, v11
+; CHECK-NEXT: vluxseg3ei8.v v7, (a0), v10, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg3.mask.nxv1f16.nxv1i8(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, half* %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
; CHECK-LABEL: test_vluxseg4_mask_nxv1f16_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vluxseg4ei64.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg4_mask_nxv1f16_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vluxseg4ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg4_mask_nxv1f16_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vluxseg4ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg4_mask_nxv1f16_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vluxseg4ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg5_mask_nxv1f16_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vluxseg5ei64.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg5_mask_nxv1f16_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vluxseg5ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg5_mask_nxv1f16_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vluxseg5ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg5_mask_nxv1f16_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vluxseg5ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg6_mask_nxv1f16_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vluxseg6ei64.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg6_mask_nxv1f16_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vluxseg6ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg6_mask_nxv1f16_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vluxseg6ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg6_mask_nxv1f16_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vluxseg6ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg7_mask_nxv1f16_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vluxseg7ei64.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg7_mask_nxv1f16_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vluxseg7ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg7_mask_nxv1f16_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vluxseg7ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg7_mask_nxv1f16_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vluxseg7ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg8_mask_nxv1f16_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vluxseg8ei64.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg8_mask_nxv1f16_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vluxseg8ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg8_mask_nxv1f16_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vluxseg8ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg8_mask_nxv1f16_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vluxseg8ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vluxseg2ei64.v v7, (a0), v9, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg2.mask.nxv1f32.nxv1i64(<vscale x 1 x float> %val,<vscale x 1 x float> %val, float* %base, <vscale x 1 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vluxseg2ei32.v v7, (a0), v9, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg2.mask.nxv1f32.nxv1i32(<vscale x 1 x float> %val,<vscale x 1 x float> %val, float* %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vluxseg2ei16.v v7, (a0), v9, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg2.mask.nxv1f32.nxv1i16(<vscale x 1 x float> %val,<vscale x 1 x float> %val, float* %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vluxseg2ei8.v v7, (a0), v9, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg2.mask.nxv1f32.nxv1i8(<vscale x 1 x float> %val,<vscale x 1 x float> %val, float* %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
define <vscale x 1 x float> @test_vluxseg3_mask_nxv1f32_nxv1i64(<vscale x 1 x float> %val, float* %base, <vscale x 1 x i64> %index, i64 %vl, <vscale x 1 x i1> %mask) {
; CHECK-LABEL: test_vluxseg3_mask_nxv1f32_nxv1i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v10, v9
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
-; CHECK-NEXT: vluxseg3ei64.v v10, (a0), v9, v0.t
-; CHECK-NEXT: vmv1r.v v8, v11
+; CHECK-NEXT: vluxseg3ei64.v v7, (a0), v10, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg3.mask.nxv1f32.nxv1i64(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, float* %base, <vscale x 1 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
define <vscale x 1 x float> @test_vluxseg3_mask_nxv1f32_nxv1i32(<vscale x 1 x float> %val, float* %base, <vscale x 1 x i32> %index, i64 %vl, <vscale x 1 x i1> %mask) {
; CHECK-LABEL: test_vluxseg3_mask_nxv1f32_nxv1i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v10, v9
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
-; CHECK-NEXT: vluxseg3ei32.v v10, (a0), v9, v0.t
-; CHECK-NEXT: vmv1r.v v8, v11
+; CHECK-NEXT: vluxseg3ei32.v v7, (a0), v10, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg3.mask.nxv1f32.nxv1i32(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, float* %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
define <vscale x 1 x float> @test_vluxseg3_mask_nxv1f32_nxv1i16(<vscale x 1 x float> %val, float* %base, <vscale x 1 x i16> %index, i64 %vl, <vscale x 1 x i1> %mask) {
; CHECK-LABEL: test_vluxseg3_mask_nxv1f32_nxv1i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v10, v9
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
-; CHECK-NEXT: vluxseg3ei16.v v10, (a0), v9, v0.t
-; CHECK-NEXT: vmv1r.v v8, v11
+; CHECK-NEXT: vluxseg3ei16.v v7, (a0), v10, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg3.mask.nxv1f32.nxv1i16(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, float* %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
define <vscale x 1 x float> @test_vluxseg3_mask_nxv1f32_nxv1i8(<vscale x 1 x float> %val, float* %base, <vscale x 1 x i8> %index, i64 %vl, <vscale x 1 x i1> %mask) {
; CHECK-LABEL: test_vluxseg3_mask_nxv1f32_nxv1i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v10, v9
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
-; CHECK-NEXT: vluxseg3ei8.v v10, (a0), v9, v0.t
-; CHECK-NEXT: vmv1r.v v8, v11
+; CHECK-NEXT: vluxseg3ei8.v v7, (a0), v10, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg3.mask.nxv1f32.nxv1i8(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, float* %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
; CHECK-LABEL: test_vluxseg4_mask_nxv1f32_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vluxseg4ei64.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg4_mask_nxv1f32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vluxseg4ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg4_mask_nxv1f32_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vluxseg4ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg4_mask_nxv1f32_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vluxseg4ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg5_mask_nxv1f32_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vluxseg5ei64.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg5_mask_nxv1f32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vluxseg5ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg5_mask_nxv1f32_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vluxseg5ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg5_mask_nxv1f32_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vluxseg5ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg6_mask_nxv1f32_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vluxseg6ei64.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg6_mask_nxv1f32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vluxseg6ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg6_mask_nxv1f32_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vluxseg6ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg6_mask_nxv1f32_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vluxseg6ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg7_mask_nxv1f32_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vluxseg7ei64.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg7_mask_nxv1f32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vluxseg7ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg7_mask_nxv1f32_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vluxseg7ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg7_mask_nxv1f32_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vluxseg7ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg8_mask_nxv1f32_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vluxseg8ei64.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg8_mask_nxv1f32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vluxseg8ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg8_mask_nxv1f32_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vluxseg8ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg8_mask_nxv1f32_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vluxseg8ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-NEXT: vmv2r.v v6, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
; CHECK-NEXT: vluxseg2ei16.v v6, (a0), v10, v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg2.mask.nxv8f16.nxv8i16(<vscale x 8 x half> %val,<vscale x 8 x half> %val, half* %base, <vscale x 8 x i16> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv2r.v v6, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
; CHECK-NEXT: vluxseg2ei8.v v6, (a0), v10, v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg2.mask.nxv8f16.nxv8i8(<vscale x 8 x half> %val,<vscale x 8 x half> %val, half* %base, <vscale x 8 x i8> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv2r.v v6, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
; CHECK-NEXT: vluxseg2ei64.v v6, (a0), v16, v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg2.mask.nxv8f16.nxv8i64(<vscale x 8 x half> %val,<vscale x 8 x half> %val, half* %base, <vscale x 8 x i64> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv2r.v v6, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
; CHECK-NEXT: vluxseg2ei32.v v6, (a0), v12, v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg2.mask.nxv8f16.nxv8i32(<vscale x 8 x half> %val,<vscale x 8 x half> %val, half* %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
define <vscale x 8 x half> @test_vluxseg3_mask_nxv8f16_nxv8i16(<vscale x 8 x half> %val, half* %base, <vscale x 8 x i16> %index, i64 %vl, <vscale x 8 x i1> %mask) {
; CHECK-LABEL: test_vluxseg3_mask_nxv8f16_nxv8i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v6, v8
+; CHECK-NEXT: vmv2r.v v12, v10
+; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
-; CHECK-NEXT: vluxseg3ei16.v v12, (a0), v10, v0.t
-; CHECK-NEXT: vmv2r.v v8, v14
+; CHECK-NEXT: vluxseg3ei16.v v6, (a0), v12, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg3.mask.nxv8f16.nxv8i16(<vscale x 8 x half> %val,<vscale x 8 x half> %val,<vscale x 8 x half> %val, half* %base, <vscale x 8 x i16> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
define <vscale x 8 x half> @test_vluxseg3_mask_nxv8f16_nxv8i8(<vscale x 8 x half> %val, half* %base, <vscale x 8 x i8> %index, i64 %vl, <vscale x 8 x i1> %mask) {
; CHECK-LABEL: test_vluxseg3_mask_nxv8f16_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v6, v8
+; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
-; CHECK-NEXT: vluxseg3ei8.v v12, (a0), v10, v0.t
-; CHECK-NEXT: vmv2r.v v8, v14
+; CHECK-NEXT: vluxseg3ei8.v v6, (a0), v12, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg3.mask.nxv8f16.nxv8i8(<vscale x 8 x half> %val,<vscale x 8 x half> %val,<vscale x 8 x half> %val, half* %base, <vscale x 8 x i8> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
; CHECK-LABEL: test_vluxseg3_mask_nxv8f16_nxv8i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v6, v8
-; CHECK-NEXT: vmv2r.v v10, v6
+; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
; CHECK-NEXT: vluxseg3ei64.v v6, (a0), v16, v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg3.mask.nxv8f16.nxv8i64(<vscale x 8 x half> %val,<vscale x 8 x half> %val,<vscale x 8 x half> %val, half* %base, <vscale x 8 x i64> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
; CHECK-LABEL: test_vluxseg3_mask_nxv8f16_nxv8i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v6, v8
-; CHECK-NEXT: vmv2r.v v10, v6
+; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
; CHECK-NEXT: vluxseg3ei32.v v6, (a0), v12, v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg3.mask.nxv8f16.nxv8i32(<vscale x 8 x half> %val,<vscale x 8 x half> %val,<vscale x 8 x half> %val, half* %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
; CHECK-LABEL: test_vluxseg4_mask_nxv8f16_nxv8i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
; CHECK-NEXT: vluxseg4ei16.v v12, (a0), v10, v0.t
; CHECK-NEXT: vmv2r.v v8, v14
; CHECK-LABEL: test_vluxseg4_mask_nxv8f16_nxv8i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
; CHECK-NEXT: vluxseg4ei8.v v12, (a0), v10, v0.t
; CHECK-NEXT: vmv2r.v v8, v14
; CHECK-LABEL: test_vluxseg4_mask_nxv8f16_nxv8i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v6, v8
-; CHECK-NEXT: vmv2r.v v10, v6
-; CHECK-NEXT: vmv2r.v v12, v6
+; CHECK-NEXT: vmv2r.v v10, v8
+; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
; CHECK-NEXT: vluxseg4ei64.v v6, (a0), v16, v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg4.mask.nxv8f16.nxv8i64(<vscale x 8 x half> %val,<vscale x 8 x half> %val,<vscale x 8 x half> %val,<vscale x 8 x half> %val, half* %base, <vscale x 8 x i64> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
define <vscale x 8 x half> @test_vluxseg4_mask_nxv8f16_nxv8i32(<vscale x 8 x half> %val, half* %base, <vscale x 8 x i32> %index, i64 %vl, <vscale x 8 x i1> %mask) {
; CHECK-LABEL: test_vluxseg4_mask_nxv8f16_nxv8i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv2r.v v16, v8
-; CHECK-NEXT: vmv2r.v v18, v16
-; CHECK-NEXT: vmv2r.v v20, v16
-; CHECK-NEXT: vmv2r.v v22, v16
+; CHECK-NEXT: vmv2r.v v6, v8
+; CHECK-NEXT: vmv2r.v v10, v8
+; CHECK-NEXT: vmv4r.v v16, v12
+; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
-; CHECK-NEXT: vluxseg4ei32.v v16, (a0), v12, v0.t
-; CHECK-NEXT: vmv2r.v v8, v18
+; CHECK-NEXT: vluxseg4ei32.v v6, (a0), v16, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg4.mask.nxv8f16.nxv8i32(<vscale x 8 x half> %val,<vscale x 8 x half> %val,<vscale x 8 x half> %val,<vscale x 8 x half> %val, half* %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv4r.v v4, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
; CHECK-NEXT: vluxseg2ei16.v v4, (a0), v12, v0.t
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x float>,<vscale x 8 x float>} @llvm.riscv.vluxseg2.mask.nxv8f32.nxv8i16(<vscale x 8 x float> %val,<vscale x 8 x float> %val, float* %base, <vscale x 8 x i16> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv4r.v v4, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
; CHECK-NEXT: vluxseg2ei8.v v4, (a0), v12, v0.t
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x float>,<vscale x 8 x float>} @llvm.riscv.vluxseg2.mask.nxv8f32.nxv8i8(<vscale x 8 x float> %val,<vscale x 8 x float> %val, float* %base, <vscale x 8 x i8> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv4r.v v4, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
; CHECK-NEXT: vluxseg2ei64.v v4, (a0), v16, v0.t
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x float>,<vscale x 8 x float>} @llvm.riscv.vluxseg2.mask.nxv8f32.nxv8i64(<vscale x 8 x float> %val,<vscale x 8 x float> %val, float* %base, <vscale x 8 x i64> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv4r.v v4, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
; CHECK-NEXT: vluxseg2ei32.v v4, (a0), v12, v0.t
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 8 x float>,<vscale x 8 x float>} @llvm.riscv.vluxseg2.mask.nxv8f32.nxv8i32(<vscale x 8 x float> %val,<vscale x 8 x float> %val, float* %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv2r.v v6, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
; CHECK-NEXT: vluxseg2ei32.v v6, (a0), v10, v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg2.mask.nxv2f64.nxv2i32(<vscale x 2 x double> %val,<vscale x 2 x double> %val, double* %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv2r.v v6, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
; CHECK-NEXT: vluxseg2ei8.v v6, (a0), v10, v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg2.mask.nxv2f64.nxv2i8(<vscale x 2 x double> %val,<vscale x 2 x double> %val, double* %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv2r.v v6, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
; CHECK-NEXT: vluxseg2ei16.v v6, (a0), v10, v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg2.mask.nxv2f64.nxv2i16(<vscale x 2 x double> %val,<vscale x 2 x double> %val, double* %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv2r.v v6, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
; CHECK-NEXT: vluxseg2ei64.v v6, (a0), v10, v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg2.mask.nxv2f64.nxv2i64(<vscale x 2 x double> %val,<vscale x 2 x double> %val, double* %base, <vscale x 2 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
define <vscale x 2 x double> @test_vluxseg3_mask_nxv2f64_nxv2i32(<vscale x 2 x double> %val, double* %base, <vscale x 2 x i32> %index, i64 %vl, <vscale x 2 x i1> %mask) {
; CHECK-LABEL: test_vluxseg3_mask_nxv2f64_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v6, v8
+; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
-; CHECK-NEXT: vluxseg3ei32.v v12, (a0), v10, v0.t
-; CHECK-NEXT: vmv2r.v v8, v14
+; CHECK-NEXT: vluxseg3ei32.v v6, (a0), v12, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg3.mask.nxv2f64.nxv2i32(<vscale x 2 x double> %val,<vscale x 2 x double> %val,<vscale x 2 x double> %val, double* %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
define <vscale x 2 x double> @test_vluxseg3_mask_nxv2f64_nxv2i8(<vscale x 2 x double> %val, double* %base, <vscale x 2 x i8> %index, i64 %vl, <vscale x 2 x i1> %mask) {
; CHECK-LABEL: test_vluxseg3_mask_nxv2f64_nxv2i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v6, v8
+; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
-; CHECK-NEXT: vluxseg3ei8.v v12, (a0), v10, v0.t
-; CHECK-NEXT: vmv2r.v v8, v14
+; CHECK-NEXT: vluxseg3ei8.v v6, (a0), v12, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg3.mask.nxv2f64.nxv2i8(<vscale x 2 x double> %val,<vscale x 2 x double> %val,<vscale x 2 x double> %val, double* %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
define <vscale x 2 x double> @test_vluxseg3_mask_nxv2f64_nxv2i16(<vscale x 2 x double> %val, double* %base, <vscale x 2 x i16> %index, i64 %vl, <vscale x 2 x i1> %mask) {
; CHECK-LABEL: test_vluxseg3_mask_nxv2f64_nxv2i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v6, v8
+; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
-; CHECK-NEXT: vluxseg3ei16.v v12, (a0), v10, v0.t
-; CHECK-NEXT: vmv2r.v v8, v14
+; CHECK-NEXT: vluxseg3ei16.v v6, (a0), v12, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg3.mask.nxv2f64.nxv2i16(<vscale x 2 x double> %val,<vscale x 2 x double> %val,<vscale x 2 x double> %val, double* %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
define <vscale x 2 x double> @test_vluxseg3_mask_nxv2f64_nxv2i64(<vscale x 2 x double> %val, double* %base, <vscale x 2 x i64> %index, i64 %vl, <vscale x 2 x i1> %mask) {
; CHECK-LABEL: test_vluxseg3_mask_nxv2f64_nxv2i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v6, v8
+; CHECK-NEXT: vmv2r.v v12, v10
+; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
-; CHECK-NEXT: vluxseg3ei64.v v12, (a0), v10, v0.t
-; CHECK-NEXT: vmv2r.v v8, v14
+; CHECK-NEXT: vluxseg3ei64.v v6, (a0), v12, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg3.mask.nxv2f64.nxv2i64(<vscale x 2 x double> %val,<vscale x 2 x double> %val,<vscale x 2 x double> %val, double* %base, <vscale x 2 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
; CHECK-LABEL: test_vluxseg4_mask_nxv2f64_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
; CHECK-NEXT: vluxseg4ei32.v v12, (a0), v10, v0.t
; CHECK-NEXT: vmv2r.v v8, v14
; CHECK-LABEL: test_vluxseg4_mask_nxv2f64_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
; CHECK-NEXT: vluxseg4ei8.v v12, (a0), v10, v0.t
; CHECK-NEXT: vmv2r.v v8, v14
; CHECK-LABEL: test_vluxseg4_mask_nxv2f64_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
; CHECK-NEXT: vluxseg4ei16.v v12, (a0), v10, v0.t
; CHECK-NEXT: vmv2r.v v8, v14
; CHECK-LABEL: test_vluxseg4_mask_nxv2f64_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
; CHECK-NEXT: vluxseg4ei64.v v12, (a0), v10, v0.t
; CHECK-NEXT: vmv2r.v v8, v14
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vluxseg2ei32.v v7, (a0), v10, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg2.mask.nxv4f16.nxv4i32(<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vluxseg2ei8.v v7, (a0), v9, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg2.mask.nxv4f16.nxv4i8(<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vluxseg2ei64.v v7, (a0), v12, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg2.mask.nxv4f16.nxv4i64(<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vluxseg2ei16.v v7, (a0), v9, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg2.mask.nxv4f16.nxv4i16(<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
; CHECK-LABEL: test_vluxseg3_mask_nxv4f16_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vluxseg3ei32.v v7, (a0), v10, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg3.mask.nxv4f16.nxv4i32(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
define <vscale x 4 x half> @test_vluxseg3_mask_nxv4f16_nxv4i8(<vscale x 4 x half> %val, half* %base, <vscale x 4 x i8> %index, i64 %vl, <vscale x 4 x i1> %mask) {
; CHECK-LABEL: test_vluxseg3_mask_nxv4f16_nxv4i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v10, v9
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
-; CHECK-NEXT: vluxseg3ei8.v v10, (a0), v9, v0.t
-; CHECK-NEXT: vmv1r.v v8, v11
+; CHECK-NEXT: vluxseg3ei8.v v7, (a0), v10, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg3.mask.nxv4f16.nxv4i8(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
; CHECK-LABEL: test_vluxseg3_mask_nxv4f16_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vluxseg3ei64.v v7, (a0), v12, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg3.mask.nxv4f16.nxv4i64(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
define <vscale x 4 x half> @test_vluxseg3_mask_nxv4f16_nxv4i16(<vscale x 4 x half> %val, half* %base, <vscale x 4 x i16> %index, i64 %vl, <vscale x 4 x i1> %mask) {
; CHECK-LABEL: test_vluxseg3_mask_nxv4f16_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v10, v9
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
-; CHECK-NEXT: vluxseg3ei16.v v10, (a0), v9, v0.t
-; CHECK-NEXT: vmv1r.v v8, v11
+; CHECK-NEXT: vluxseg3ei16.v v7, (a0), v10, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg3.mask.nxv4f16.nxv4i16(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
define <vscale x 4 x half> @test_vluxseg4_mask_nxv4f16_nxv4i32(<vscale x 4 x half> %val, half* %base, <vscale x 4 x i32> %index, i64 %vl, <vscale x 4 x i1> %mask) {
; CHECK-LABEL: test_vluxseg4_mask_nxv4f16_nxv4i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv2r.v v12, v10
+; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
-; CHECK-NEXT: vluxseg4ei32.v v12, (a0), v10, v0.t
-; CHECK-NEXT: vmv1r.v v8, v13
+; CHECK-NEXT: vluxseg4ei32.v v7, (a0), v12, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg4.mask.nxv4f16.nxv4i32(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
; CHECK-LABEL: test_vluxseg4_mask_nxv4f16_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vluxseg4ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg4_mask_nxv4f16_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
-; CHECK-NEXT: vmv1r.v v10, v7
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vluxseg4ei64.v v7, (a0), v12, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg4.mask.nxv4f16.nxv4i64(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
; CHECK-LABEL: test_vluxseg4_mask_nxv4f16_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vluxseg4ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg5_mask_nxv4f16_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vluxseg5ei32.v v12, (a0), v10, v0.t
; CHECK-NEXT: vmv1r.v v8, v13
; CHECK-LABEL: test_vluxseg5_mask_nxv4f16_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vluxseg5ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg5_mask_nxv4f16_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
-; CHECK-NEXT: vmv1r.v v10, v7
-; CHECK-NEXT: vmv1r.v v11, v7
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vmv1r.v v11, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vluxseg5ei64.v v7, (a0), v12, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg5.mask.nxv4f16.nxv4i64(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
; CHECK-LABEL: test_vluxseg5_mask_nxv4f16_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vluxseg5ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg6_mask_nxv4f16_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vluxseg6ei32.v v12, (a0), v10, v0.t
; CHECK-NEXT: vmv1r.v v8, v13
; CHECK-LABEL: test_vluxseg6_mask_nxv4f16_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vluxseg6ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
define <vscale x 4 x half> @test_vluxseg6_mask_nxv4f16_nxv4i64(<vscale x 4 x half> %val, half* %base, <vscale x 4 x i64> %index, i64 %vl, <vscale x 4 x i1> %mask) {
; CHECK-LABEL: test_vluxseg6_mask_nxv4f16_nxv4i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v16, v8
-; CHECK-NEXT: vmv1r.v v17, v16
-; CHECK-NEXT: vmv1r.v v18, v16
-; CHECK-NEXT: vmv1r.v v19, v16
-; CHECK-NEXT: vmv1r.v v20, v16
-; CHECK-NEXT: vmv1r.v v21, v16
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv4r.v v16, v12
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
-; CHECK-NEXT: vluxseg6ei64.v v16, (a0), v12, v0.t
-; CHECK-NEXT: vmv1r.v v8, v17
+; CHECK-NEXT: vluxseg6ei64.v v7, (a0), v16, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg6.mask.nxv4f16.nxv4i64(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
; CHECK-LABEL: test_vluxseg6_mask_nxv4f16_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vluxseg6ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg7_mask_nxv4f16_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
-; CHECK-NEXT: vmv1r.v v18, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vluxseg7ei32.v v12, (a0), v10, v0.t
; CHECK-NEXT: vmv1r.v v8, v13
; CHECK-LABEL: test_vluxseg7_mask_nxv4f16_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vluxseg7ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg7_mask_nxv4f16_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v16, v8
-; CHECK-NEXT: vmv1r.v v17, v16
-; CHECK-NEXT: vmv1r.v v18, v16
-; CHECK-NEXT: vmv1r.v v19, v16
-; CHECK-NEXT: vmv1r.v v20, v16
-; CHECK-NEXT: vmv1r.v v21, v16
-; CHECK-NEXT: vmv1r.v v22, v16
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
+; CHECK-NEXT: vmv1r.v v19, v8
+; CHECK-NEXT: vmv1r.v v20, v8
+; CHECK-NEXT: vmv1r.v v21, v8
+; CHECK-NEXT: vmv1r.v v22, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vluxseg7ei64.v v16, (a0), v12, v0.t
; CHECK-NEXT: vmv1r.v v8, v17
; CHECK-LABEL: test_vluxseg7_mask_nxv4f16_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vluxseg7ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg8_mask_nxv4f16_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
-; CHECK-NEXT: vmv1r.v v18, v12
-; CHECK-NEXT: vmv1r.v v19, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
+; CHECK-NEXT: vmv1r.v v19, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vluxseg8ei32.v v12, (a0), v10, v0.t
; CHECK-NEXT: vmv1r.v v8, v13
; CHECK-LABEL: test_vluxseg8_mask_nxv4f16_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vluxseg8ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg8_mask_nxv4f16_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v16, v8
-; CHECK-NEXT: vmv1r.v v17, v16
-; CHECK-NEXT: vmv1r.v v18, v16
-; CHECK-NEXT: vmv1r.v v19, v16
-; CHECK-NEXT: vmv1r.v v20, v16
-; CHECK-NEXT: vmv1r.v v21, v16
-; CHECK-NEXT: vmv1r.v v22, v16
-; CHECK-NEXT: vmv1r.v v23, v16
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
+; CHECK-NEXT: vmv1r.v v19, v8
+; CHECK-NEXT: vmv1r.v v20, v8
+; CHECK-NEXT: vmv1r.v v21, v8
+; CHECK-NEXT: vmv1r.v v22, v8
+; CHECK-NEXT: vmv1r.v v23, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vluxseg8ei64.v v16, (a0), v12, v0.t
; CHECK-NEXT: vmv1r.v v8, v17
; CHECK-LABEL: test_vluxseg8_mask_nxv4f16_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vluxseg8ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vluxseg2ei32.v v7, (a0), v9, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg2.mask.nxv2f16.nxv2i32(<vscale x 2 x half> %val,<vscale x 2 x half> %val, half* %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vluxseg2ei8.v v7, (a0), v9, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg2.mask.nxv2f16.nxv2i8(<vscale x 2 x half> %val,<vscale x 2 x half> %val, half* %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vluxseg2ei16.v v7, (a0), v9, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg2.mask.nxv2f16.nxv2i16(<vscale x 2 x half> %val,<vscale x 2 x half> %val, half* %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv1r.v v7, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vluxseg2ei64.v v7, (a0), v10, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg2.mask.nxv2f16.nxv2i64(<vscale x 2 x half> %val,<vscale x 2 x half> %val, half* %base, <vscale x 2 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
define <vscale x 2 x half> @test_vluxseg3_mask_nxv2f16_nxv2i32(<vscale x 2 x half> %val, half* %base, <vscale x 2 x i32> %index, i64 %vl, <vscale x 2 x i1> %mask) {
; CHECK-LABEL: test_vluxseg3_mask_nxv2f16_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v10, v9
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
-; CHECK-NEXT: vluxseg3ei32.v v10, (a0), v9, v0.t
-; CHECK-NEXT: vmv1r.v v8, v11
+; CHECK-NEXT: vluxseg3ei32.v v7, (a0), v10, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg3.mask.nxv2f16.nxv2i32(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, half* %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
define <vscale x 2 x half> @test_vluxseg3_mask_nxv2f16_nxv2i8(<vscale x 2 x half> %val, half* %base, <vscale x 2 x i8> %index, i64 %vl, <vscale x 2 x i1> %mask) {
; CHECK-LABEL: test_vluxseg3_mask_nxv2f16_nxv2i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v10, v9
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
-; CHECK-NEXT: vluxseg3ei8.v v10, (a0), v9, v0.t
-; CHECK-NEXT: vmv1r.v v8, v11
+; CHECK-NEXT: vluxseg3ei8.v v7, (a0), v10, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg3.mask.nxv2f16.nxv2i8(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, half* %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
define <vscale x 2 x half> @test_vluxseg3_mask_nxv2f16_nxv2i16(<vscale x 2 x half> %val, half* %base, <vscale x 2 x i16> %index, i64 %vl, <vscale x 2 x i1> %mask) {
; CHECK-LABEL: test_vluxseg3_mask_nxv2f16_nxv2i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v10, v9
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
-; CHECK-NEXT: vluxseg3ei16.v v10, (a0), v9, v0.t
-; CHECK-NEXT: vmv1r.v v8, v11
+; CHECK-NEXT: vluxseg3ei16.v v7, (a0), v10, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg3.mask.nxv2f16.nxv2i16(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, half* %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
; CHECK-LABEL: test_vluxseg3_mask_nxv2f16_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v7, v8
-; CHECK-NEXT: vmv1r.v v9, v7
+; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vluxseg3ei64.v v7, (a0), v10, v0.t
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8_v9
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg3.mask.nxv2f16.nxv2i64(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, half* %base, <vscale x 2 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
; CHECK-LABEL: test_vluxseg4_mask_nxv2f16_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vluxseg4ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg4_mask_nxv2f16_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vluxseg4ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg4_mask_nxv2f16_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vluxseg4ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
define <vscale x 2 x half> @test_vluxseg4_mask_nxv2f16_nxv2i64(<vscale x 2 x half> %val, half* %base, <vscale x 2 x i64> %index, i64 %vl, <vscale x 2 x i1> %mask) {
; CHECK-LABEL: test_vluxseg4_mask_nxv2f16_nxv2i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
+; CHECK-NEXT: vmv1r.v v7, v8
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv2r.v v12, v10
+; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
-; CHECK-NEXT: vluxseg4ei64.v v12, (a0), v10, v0.t
-; CHECK-NEXT: vmv1r.v v8, v13
+; CHECK-NEXT: vluxseg4ei64.v v7, (a0), v12, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg4.mask.nxv2f16.nxv2i64(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, half* %base, <vscale x 2 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
; CHECK-LABEL: test_vluxseg5_mask_nxv2f16_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vluxseg5ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg5_mask_nxv2f16_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vluxseg5ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg5_mask_nxv2f16_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vluxseg5ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg5_mask_nxv2f16_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vluxseg5ei64.v v12, (a0), v10, v0.t
; CHECK-NEXT: vmv1r.v v8, v13
; CHECK-LABEL: test_vluxseg6_mask_nxv2f16_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vluxseg6ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg6_mask_nxv2f16_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vluxseg6ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg6_mask_nxv2f16_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vluxseg6ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg6_mask_nxv2f16_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vluxseg6ei64.v v12, (a0), v10, v0.t
; CHECK-NEXT: vmv1r.v v8, v13
; CHECK-LABEL: test_vluxseg7_mask_nxv2f16_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vluxseg7ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg7_mask_nxv2f16_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vluxseg7ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg7_mask_nxv2f16_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vluxseg7ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg7_mask_nxv2f16_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
-; CHECK-NEXT: vmv1r.v v18, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vluxseg7ei64.v v12, (a0), v10, v0.t
; CHECK-NEXT: vmv1r.v v8, v13
; CHECK-LABEL: test_vluxseg8_mask_nxv2f16_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vluxseg8ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg8_mask_nxv2f16_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vluxseg8ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg8_mask_nxv2f16_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vluxseg8ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: vmv1r.v v8, v11
; CHECK-LABEL: test_vluxseg8_mask_nxv2f16_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
-; CHECK-NEXT: vmv1r.v v18, v12
-; CHECK-NEXT: vmv1r.v v19, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
+; CHECK-NEXT: vmv1r.v v19, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vluxseg8ei64.v v12, (a0), v10, v0.t
; CHECK-NEXT: vmv1r.v v8, v13
; CHECK-NEXT: vmv2r.v v6, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; CHECK-NEXT: vluxseg2ei32.v v6, (a0), v10, v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg2.mask.nxv4f32.nxv4i32(<vscale x 4 x float> %val,<vscale x 4 x float> %val, float* %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv2r.v v6, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; CHECK-NEXT: vluxseg2ei8.v v6, (a0), v10, v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg2.mask.nxv4f32.nxv4i8(<vscale x 4 x float> %val,<vscale x 4 x float> %val, float* %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv2r.v v6, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; CHECK-NEXT: vluxseg2ei64.v v6, (a0), v12, v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg2.mask.nxv4f32.nxv4i64(<vscale x 4 x float> %val,<vscale x 4 x float> %val, float* %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
; CHECK-NEXT: vmv2r.v v6, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; CHECK-NEXT: vluxseg2ei16.v v6, (a0), v10, v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg2.mask.nxv4f32.nxv4i16(<vscale x 4 x float> %val,<vscale x 4 x float> %val, float* %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
define <vscale x 4 x float> @test_vluxseg3_mask_nxv4f32_nxv4i32(<vscale x 4 x float> %val, float* %base, <vscale x 4 x i32> %index, i64 %vl, <vscale x 4 x i1> %mask) {
; CHECK-LABEL: test_vluxseg3_mask_nxv4f32_nxv4i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v6, v8
+; CHECK-NEXT: vmv2r.v v12, v10
+; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
-; CHECK-NEXT: vluxseg3ei32.v v12, (a0), v10, v0.t
-; CHECK-NEXT: vmv2r.v v8, v14
+; CHECK-NEXT: vluxseg3ei32.v v6, (a0), v12, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg3.mask.nxv4f32.nxv4i32(<vscale x 4 x float> %val,<vscale x 4 x float> %val,<vscale x 4 x float> %val, float* %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
define <vscale x 4 x float> @test_vluxseg3_mask_nxv4f32_nxv4i8(<vscale x 4 x float> %val, float* %base, <vscale x 4 x i8> %index, i64 %vl, <vscale x 4 x i1> %mask) {
; CHECK-LABEL: test_vluxseg3_mask_nxv4f32_nxv4i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v6, v8
+; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
-; CHECK-NEXT: vluxseg3ei8.v v12, (a0), v10, v0.t
-; CHECK-NEXT: vmv2r.v v8, v14
+; CHECK-NEXT: vluxseg3ei8.v v6, (a0), v12, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg3.mask.nxv4f32.nxv4i8(<vscale x 4 x float> %val,<vscale x 4 x float> %val,<vscale x 4 x float> %val, float* %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
; CHECK-LABEL: test_vluxseg3_mask_nxv4f32_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v6, v8
-; CHECK-NEXT: vmv2r.v v10, v6
+; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; CHECK-NEXT: vluxseg3ei64.v v6, (a0), v12, v0.t
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg3.mask.nxv4f32.nxv4i64(<vscale x 4 x float> %val,<vscale x 4 x float> %val,<vscale x 4 x float> %val, float* %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
define <vscale x 4 x float> @test_vluxseg3_mask_nxv4f32_nxv4i16(<vscale x 4 x float> %val, float* %base, <vscale x 4 x i16> %index, i64 %vl, <vscale x 4 x i1> %mask) {
; CHECK-LABEL: test_vluxseg3_mask_nxv4f32_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v6, v8
+; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
-; CHECK-NEXT: vluxseg3ei16.v v12, (a0), v10, v0.t
-; CHECK-NEXT: vmv2r.v v8, v14
+; CHECK-NEXT: vluxseg3ei16.v v6, (a0), v12, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg3.mask.nxv4f32.nxv4i16(<vscale x 4 x float> %val,<vscale x 4 x float> %val,<vscale x 4 x float> %val, float* %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
; CHECK-LABEL: test_vluxseg4_mask_nxv4f32_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; CHECK-NEXT: vluxseg4ei32.v v12, (a0), v10, v0.t
; CHECK-NEXT: vmv2r.v v8, v14
; CHECK-LABEL: test_vluxseg4_mask_nxv4f32_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; CHECK-NEXT: vluxseg4ei8.v v12, (a0), v10, v0.t
; CHECK-NEXT: vmv2r.v v8, v14
define <vscale x 4 x float> @test_vluxseg4_mask_nxv4f32_nxv4i64(<vscale x 4 x float> %val, float* %base, <vscale x 4 x i64> %index, i64 %vl, <vscale x 4 x i1> %mask) {
; CHECK-LABEL: test_vluxseg4_mask_nxv4f32_nxv4i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv2r.v v16, v8
-; CHECK-NEXT: vmv2r.v v18, v16
-; CHECK-NEXT: vmv2r.v v20, v16
-; CHECK-NEXT: vmv2r.v v22, v16
+; CHECK-NEXT: vmv2r.v v6, v8
+; CHECK-NEXT: vmv2r.v v10, v8
+; CHECK-NEXT: vmv4r.v v16, v12
+; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
-; CHECK-NEXT: vluxseg4ei64.v v16, (a0), v12, v0.t
-; CHECK-NEXT: vmv2r.v v8, v18
+; CHECK-NEXT: vluxseg4ei64.v v6, (a0), v16, v0.t
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg4.mask.nxv4f32.nxv4i64(<vscale x 4 x float> %val,<vscale x 4 x float> %val,<vscale x 4 x float> %val,<vscale x 4 x float> %val, float* %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
; CHECK-LABEL: test_vluxseg4_mask_nxv4f32_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; CHECK-NEXT: vluxseg4ei16.v v12, (a0), v10, v0.t
; CHECK-NEXT: vmv2r.v v8, v14
; RV32-NEXT: mv a3, a4
; RV32-NEXT: .LBB12_2:
; RV32-NEXT: vsetvli a4, zero, e32, m8, ta, mu
-; RV32-NEXT: vsext.vf4 v24, v10
+; RV32-NEXT: vsext.vf4 v16, v10
; RV32-NEXT: vsetvli zero, a3, e8, m2, ta, mu
-; RV32-NEXT: vluxei32.v v18, (a0), v24, v0.t
+; RV32-NEXT: vluxei32.v v10, (a0), v16, v0.t
; RV32-NEXT: bltu a1, a2, .LBB12_4
; RV32-NEXT: # %bb.3:
; RV32-NEXT: mv a1, a2
; RV32-NEXT: .LBB12_4:
; RV32-NEXT: vsetvli a2, zero, e32, m8, ta, mu
-; RV32-NEXT: vsext.vf4 v24, v8
+; RV32-NEXT: vsext.vf4 v16, v8
; RV32-NEXT: vsetvli zero, a1, e8, m2, ta, mu
; RV32-NEXT: vmv1r.v v0, v12
-; RV32-NEXT: vluxei32.v v16, (a0), v24, v0.t
-; RV32-NEXT: vmv4r.v v8, v16
+; RV32-NEXT: vluxei32.v v8, (a0), v16, v0.t
; RV32-NEXT: ret
;
; RV64-LABEL: vpgather_baseidx_nxv32i8:
; RV64-NEXT: vsetvli t0, zero, e8, mf4, ta, mu
; RV64-NEXT: vslidedown.vx v0, v13, a6
; RV64-NEXT: vsetvli t0, zero, e64, m8, ta, mu
-; RV64-NEXT: vsext.vf8 v24, v11
+; RV64-NEXT: vsext.vf8 v16, v11
; RV64-NEXT: vsetvli zero, a7, e8, m1, ta, mu
-; RV64-NEXT: vluxei64.v v19, (a0), v24, v0.t
+; RV64-NEXT: vluxei64.v v11, (a0), v16, v0.t
; RV64-NEXT: bltu a1, a5, .LBB12_6
; RV64-NEXT: # %bb.5:
; RV64-NEXT: mv a1, a5
; RV64-NEXT: vsetvli a5, zero, e8, mf4, ta, mu
; RV64-NEXT: vslidedown.vx v0, v12, a6
; RV64-NEXT: vsetvli a5, zero, e64, m8, ta, mu
-; RV64-NEXT: vsext.vf8 v24, v9
+; RV64-NEXT: vsext.vf8 v16, v9
; RV64-NEXT: vsetvli zero, a4, e8, m1, ta, mu
-; RV64-NEXT: vluxei64.v v17, (a0), v24, v0.t
+; RV64-NEXT: vluxei64.v v9, (a0), v16, v0.t
; RV64-NEXT: bltu a1, a3, .LBB12_10
; RV64-NEXT: # %bb.9:
; RV64-NEXT: mv a1, a3
; RV64-NEXT: .LBB12_10:
; RV64-NEXT: vsetvli a4, zero, e64, m8, ta, mu
-; RV64-NEXT: vsext.vf8 v24, v8
+; RV64-NEXT: vsext.vf8 v16, v8
; RV64-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; RV64-NEXT: vmv1r.v v0, v12
-; RV64-NEXT: vluxei64.v v16, (a0), v24, v0.t
+; RV64-NEXT: vluxei64.v v8, (a0), v16, v0.t
; RV64-NEXT: bltu a2, a3, .LBB12_12
; RV64-NEXT: # %bb.11:
; RV64-NEXT: mv a2, a3
; RV64-NEXT: .LBB12_12:
; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu
-; RV64-NEXT: vsext.vf8 v24, v10
+; RV64-NEXT: vsext.vf8 v16, v10
; RV64-NEXT: vsetvli zero, a2, e8, m1, ta, mu
; RV64-NEXT: vmv1r.v v0, v13
-; RV64-NEXT: vluxei64.v v18, (a0), v24, v0.t
-; RV64-NEXT: vmv4r.v v8, v16
+; RV64-NEXT: vluxei64.v v10, (a0), v16, v0.t
; RV64-NEXT: ret
%ptrs = getelementptr inbounds i8, i8* %base, <vscale x 32 x i8> %idxs
%v = call <vscale x 32 x i8> @llvm.vp.gather.nxv32i8.nxv32p0i8(<vscale x 32 x i8*> %ptrs, <vscale x 32 x i1> %m, i32 %evl)
; CHECK-NEXT: vsetvli a0, a0, e64, m1, ta, mu
; CHECK-NEXT: vfadd.vv v9, v8, v9
; CHECK-NEXT: vfmul.vv v8, v9, v8
+; CHECK-NEXT: # implicit-def: $x10
; CHECK-NEXT: ret
; CHECK-NEXT: .LBB2_2: # %if.else
; CHECK-NEXT: vsetvli a0, a0, e64, m1, ta, mu
; CHECK-NEXT: vfsub.vv v9, v8, v9
; CHECK-NEXT: vfmul.vv v8, v9, v8
+; CHECK-NEXT: # implicit-def: $x10
; CHECK-NEXT: ret
entry:
%tobool = icmp eq i8 %cond, 0
define void @test_vsoxseg2_nxv16i16_nxv16i16(<vscale x 16 x i16> %val, i16* %base, <vscale x 16 x i16> %index, i32 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv16i16_nxv16i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
; CHECK-NEXT: vmv4r.v v16, v12
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
define void @test_vsoxseg2_mask_nxv16i16_nxv16i16(<vscale x 16 x i16> %val, i16* %base, <vscale x 16 x i16> %index, <vscale x 16 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv16i16_nxv16i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
; CHECK-NEXT: vmv4r.v v16, v12
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
define void @test_vsoxseg2_nxv16i16_nxv16i8(<vscale x 16 x i16> %val, i16* %base, <vscale x 16 x i8> %index, i32 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv16i16_nxv16i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
; CHECK-NEXT: vmv2r.v v16, v12
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
define void @test_vsoxseg2_mask_nxv16i16_nxv16i8(<vscale x 16 x i16> %val, i16* %base, <vscale x 16 x i8> %index, <vscale x 16 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv16i16_nxv16i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
; CHECK-NEXT: vmv2r.v v16, v12
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
define void @test_vsoxseg2_nxv16i16_nxv16i32(<vscale x 16 x i16> %val, i16* %base, <vscale x 16 x i32> %index, i32 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv16i16_nxv16i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v16
define void @test_vsoxseg2_mask_nxv16i16_nxv16i32(<vscale x 16 x i16> %val, i16* %base, <vscale x 16 x i32> %index, <vscale x 16 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv16i16_nxv16i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v16, v0.t
define void @test_vsoxseg2_nxv1i8_nxv1i8(<vscale x 1 x i8> %val, i8* %base, <vscale x 1 x i8> %index, i32 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv1i8_nxv1i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
define void @test_vsoxseg2_mask_nxv1i8_nxv1i8(<vscale x 1 x i8> %val, i8* %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv1i8_nxv1i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
define void @test_vsoxseg2_nxv1i8_nxv1i32(<vscale x 1 x i8> %val, i8* %base, <vscale x 1 x i32> %index, i32 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv1i8_nxv1i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
define void @test_vsoxseg2_mask_nxv1i8_nxv1i32(<vscale x 1 x i8> %val, i8* %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv1i8_nxv1i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
define void @test_vsoxseg2_nxv1i8_nxv1i16(<vscale x 1 x i8> %val, i8* %base, <vscale x 1 x i16> %index, i32 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv1i8_nxv1i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
define void @test_vsoxseg2_mask_nxv1i8_nxv1i16(<vscale x 1 x i8> %val, i8* %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv1i8_nxv1i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-LABEL: test_vsoxseg3_nxv1i8_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vsoxseg3ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_mask_nxv1i8_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vsoxseg3ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_nxv1i8_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vsoxseg3ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_mask_nxv1i8_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vsoxseg3ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_nxv1i8_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vsoxseg3ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_mask_nxv1i8_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vsoxseg3ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_nxv1i8_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vsoxseg4ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_mask_nxv1i8_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vsoxseg4ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_nxv1i8_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vsoxseg4ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_mask_nxv1i8_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vsoxseg4ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_nxv1i8_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vsoxseg4ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_mask_nxv1i8_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vsoxseg4ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_nxv1i8_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vsoxseg5ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_mask_nxv1i8_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vsoxseg5ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_nxv1i8_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vsoxseg5ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_mask_nxv1i8_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vsoxseg5ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_nxv1i8_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vsoxseg5ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_mask_nxv1i8_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vsoxseg5ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_nxv1i8_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vsoxseg6ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_mask_nxv1i8_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vsoxseg6ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_nxv1i8_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vsoxseg6ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_mask_nxv1i8_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vsoxseg6ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_nxv1i8_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vsoxseg6ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_mask_nxv1i8_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vsoxseg6ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_nxv1i8_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vsoxseg7ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_mask_nxv1i8_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vsoxseg7ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_nxv1i8_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vsoxseg7ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_mask_nxv1i8_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vsoxseg7ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_nxv1i8_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vsoxseg7ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_mask_nxv1i8_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vsoxseg7ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_nxv1i8_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vsoxseg8ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_mask_nxv1i8_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vsoxseg8ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_nxv1i8_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vsoxseg8ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_mask_nxv1i8_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vsoxseg8ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_nxv1i8_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vsoxseg8ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_mask_nxv1i8_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vsoxseg8ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
define void @test_vsoxseg2_nxv16i8_nxv16i16(<vscale x 16 x i8> %val, i8* %base, <vscale x 16 x i16> %index, i32 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv16i8_nxv16i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v12
define void @test_vsoxseg2_mask_nxv16i8_nxv16i16(<vscale x 16 x i8> %val, i8* %base, <vscale x 16 x i16> %index, <vscale x 16 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv16i8_nxv16i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v12, v0.t
define void @test_vsoxseg2_nxv16i8_nxv16i8(<vscale x 16 x i8> %val, i8* %base, <vscale x 16 x i8> %index, i32 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv16i8_nxv16i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
; CHECK-NEXT: vmv2r.v v12, v10
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
define void @test_vsoxseg2_mask_nxv16i8_nxv16i8(<vscale x 16 x i8> %val, i8* %base, <vscale x 16 x i8> %index, <vscale x 16 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv16i8_nxv16i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
; CHECK-NEXT: vmv2r.v v12, v10
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
define void @test_vsoxseg2_nxv16i8_nxv16i32(<vscale x 16 x i8> %val, i8* %base, <vscale x 16 x i32> %index, i32 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv16i8_nxv16i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v16
define void @test_vsoxseg2_mask_nxv16i8_nxv16i32(<vscale x 16 x i8> %val, i8* %base, <vscale x 16 x i32> %index, <vscale x 16 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv16i8_nxv16i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v16, v0.t
define void @test_vsoxseg3_nxv16i8_nxv16i16(<vscale x 16 x i8> %val, i8* %base, <vscale x 16 x i16> %index, i32 %vl) {
; CHECK-LABEL: test_vsoxseg3_nxv16i8_nxv16i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv2r.v v16, v8
-; CHECK-NEXT: vmv2r.v v18, v16
-; CHECK-NEXT: vmv2r.v v20, v16
+; CHECK-NEXT: vmv2r.v v10, v8
+; CHECK-NEXT: vmv4r.v v16, v12
+; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
-; CHECK-NEXT: vsoxseg3ei16.v v16, (a0), v12
+; CHECK-NEXT: vsoxseg3ei16.v v8, (a0), v16
; CHECK-NEXT: ret
entry:
tail call void @llvm.riscv.vsoxseg3.nxv16i8.nxv16i16(<vscale x 16 x i8> %val,<vscale x 16 x i8> %val,<vscale x 16 x i8> %val, i8* %base, <vscale x 16 x i16> %index, i32 %vl)
define void @test_vsoxseg3_mask_nxv16i8_nxv16i16(<vscale x 16 x i8> %val, i8* %base, <vscale x 16 x i16> %index, <vscale x 16 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsoxseg3_mask_nxv16i8_nxv16i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv2r.v v16, v8
-; CHECK-NEXT: vmv2r.v v18, v16
-; CHECK-NEXT: vmv2r.v v20, v16
+; CHECK-NEXT: vmv2r.v v10, v8
+; CHECK-NEXT: vmv4r.v v16, v12
+; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
-; CHECK-NEXT: vsoxseg3ei16.v v16, (a0), v12, v0.t
+; CHECK-NEXT: vsoxseg3ei16.v v8, (a0), v16, v0.t
; CHECK-NEXT: ret
entry:
tail call void @llvm.riscv.vsoxseg3.mask.nxv16i8.nxv16i16(<vscale x 16 x i8> %val,<vscale x 16 x i8> %val,<vscale x 16 x i8> %val, i8* %base, <vscale x 16 x i16> %index, <vscale x 16 x i1> %mask, i32 %vl)
; CHECK-LABEL: test_vsoxseg3_nxv16i8_nxv16i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
; CHECK-NEXT: vsoxseg3ei8.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_mask_nxv16i8_nxv16i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
; CHECK-NEXT: vsoxseg3ei8.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
define void @test_vsoxseg3_nxv16i8_nxv16i32(<vscale x 16 x i8> %val, i8* %base, <vscale x 16 x i32> %index, i32 %vl) {
; CHECK-LABEL: test_vsoxseg3_nxv16i8_nxv16i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
define void @test_vsoxseg3_mask_nxv16i8_nxv16i32(<vscale x 16 x i8> %val, i8* %base, <vscale x 16 x i32> %index, <vscale x 16 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsoxseg3_mask_nxv16i8_nxv16i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
; CHECK-LABEL: test_vsoxseg4_nxv16i8_nxv16i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v16, v8
-; CHECK-NEXT: vmv2r.v v18, v16
-; CHECK-NEXT: vmv2r.v v20, v16
-; CHECK-NEXT: vmv2r.v v22, v16
+; CHECK-NEXT: vmv2r.v v18, v8
+; CHECK-NEXT: vmv2r.v v20, v8
+; CHECK-NEXT: vmv2r.v v22, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
; CHECK-NEXT: vsoxseg4ei16.v v16, (a0), v12
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_mask_nxv16i8_nxv16i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v16, v8
-; CHECK-NEXT: vmv2r.v v18, v16
-; CHECK-NEXT: vmv2r.v v20, v16
-; CHECK-NEXT: vmv2r.v v22, v16
+; CHECK-NEXT: vmv2r.v v18, v8
+; CHECK-NEXT: vmv2r.v v20, v8
+; CHECK-NEXT: vmv2r.v v22, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
; CHECK-NEXT: vsoxseg4ei16.v v16, (a0), v12, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_nxv16i8_nxv16i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
; CHECK-NEXT: vsoxseg4ei8.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_mask_nxv16i8_nxv16i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
; CHECK-NEXT: vsoxseg4ei8.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
define void @test_vsoxseg4_nxv16i8_nxv16i32(<vscale x 16 x i8> %val, i8* %base, <vscale x 16 x i32> %index, i32 %vl) {
; CHECK-LABEL: test_vsoxseg4_nxv16i8_nxv16i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2_v14m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vmv2r.v v14, v8
define void @test_vsoxseg4_mask_nxv16i8_nxv16i32(<vscale x 16 x i8> %val, i8* %base, <vscale x 16 x i32> %index, <vscale x 16 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsoxseg4_mask_nxv16i8_nxv16i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2_v14m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vmv2r.v v14, v8
define void @test_vsoxseg2_nxv2i32_nxv2i32(<vscale x 2 x i32> %val, i32* %base, <vscale x 2 x i32> %index, i32 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv2i32_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
define void @test_vsoxseg2_mask_nxv2i32_nxv2i32(<vscale x 2 x i32> %val, i32* %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv2i32_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
define void @test_vsoxseg2_nxv2i32_nxv2i8(<vscale x 2 x i32> %val, i32* %base, <vscale x 2 x i8> %index, i32 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv2i32_nxv2i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
define void @test_vsoxseg2_mask_nxv2i32_nxv2i8(<vscale x 2 x i32> %val, i32* %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv2i32_nxv2i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
define void @test_vsoxseg2_nxv2i32_nxv2i16(<vscale x 2 x i32> %val, i32* %base, <vscale x 2 x i16> %index, i32 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv2i32_nxv2i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
define void @test_vsoxseg2_mask_nxv2i32_nxv2i16(<vscale x 2 x i32> %val, i32* %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv2i32_nxv2i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-LABEL: test_vsoxseg3_nxv2i32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vsoxseg3ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_mask_nxv2i32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vsoxseg3ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_nxv2i32_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vsoxseg3ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_mask_nxv2i32_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vsoxseg3ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_nxv2i32_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vsoxseg3ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_mask_nxv2i32_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vsoxseg3ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_nxv2i32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vsoxseg4ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_mask_nxv2i32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vsoxseg4ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_nxv2i32_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vsoxseg4ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_mask_nxv2i32_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vsoxseg4ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_nxv2i32_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vsoxseg4ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_mask_nxv2i32_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vsoxseg4ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_nxv2i32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vsoxseg5ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_mask_nxv2i32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vsoxseg5ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_nxv2i32_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vsoxseg5ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_mask_nxv2i32_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vsoxseg5ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_nxv2i32_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vsoxseg5ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_mask_nxv2i32_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vsoxseg5ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_nxv2i32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vsoxseg6ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_mask_nxv2i32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vsoxseg6ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_nxv2i32_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vsoxseg6ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_mask_nxv2i32_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vsoxseg6ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_nxv2i32_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vsoxseg6ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_mask_nxv2i32_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vsoxseg6ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_nxv2i32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vsoxseg7ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_mask_nxv2i32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vsoxseg7ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_nxv2i32_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vsoxseg7ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_mask_nxv2i32_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vsoxseg7ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_nxv2i32_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vsoxseg7ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_mask_nxv2i32_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vsoxseg7ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_nxv2i32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vsoxseg8ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_mask_nxv2i32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vsoxseg8ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_nxv2i32_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vsoxseg8ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_mask_nxv2i32_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vsoxseg8ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_nxv2i32_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vsoxseg8ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_mask_nxv2i32_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vsoxseg8ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
define void @test_vsoxseg2_nxv4i16_nxv4i16(<vscale x 4 x i16> %val, i16* %base, <vscale x 4 x i16> %index, i32 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv4i16_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
define void @test_vsoxseg2_mask_nxv4i16_nxv4i16(<vscale x 4 x i16> %val, i16* %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv4i16_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
define void @test_vsoxseg2_nxv4i16_nxv4i8(<vscale x 4 x i16> %val, i16* %base, <vscale x 4 x i8> %index, i32 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv4i16_nxv4i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
define void @test_vsoxseg2_mask_nxv4i16_nxv4i8(<vscale x 4 x i16> %val, i16* %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv4i16_nxv4i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
define void @test_vsoxseg2_nxv4i16_nxv4i32(<vscale x 4 x i16> %val, i16* %base, <vscale x 4 x i32> %index, i32 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv4i16_nxv4i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v10
define void @test_vsoxseg2_mask_nxv4i16_nxv4i32(<vscale x 4 x i16> %val, i16* %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv4i16_nxv4i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v10, v0.t
; CHECK-LABEL: test_vsoxseg3_nxv4i16_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vsoxseg3ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_mask_nxv4i16_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vsoxseg3ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_nxv4i16_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vsoxseg3ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_mask_nxv4i16_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vsoxseg3ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
define void @test_vsoxseg3_nxv4i16_nxv4i32(<vscale x 4 x i16> %val, i16* %base, <vscale x 4 x i32> %index, i32 %vl) {
; CHECK-LABEL: test_vsoxseg3_nxv4i16_nxv4i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv2r.v v12, v10
+; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
-; CHECK-NEXT: vsoxseg3ei32.v v12, (a0), v10
+; CHECK-NEXT: vsoxseg3ei32.v v8, (a0), v12
; CHECK-NEXT: ret
entry:
tail call void @llvm.riscv.vsoxseg3.nxv4i16.nxv4i32(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 4 x i32> %index, i32 %vl)
define void @test_vsoxseg3_mask_nxv4i16_nxv4i32(<vscale x 4 x i16> %val, i16* %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsoxseg3_mask_nxv4i16_nxv4i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv2r.v v12, v10
+; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
-; CHECK-NEXT: vsoxseg3ei32.v v12, (a0), v10, v0.t
+; CHECK-NEXT: vsoxseg3ei32.v v8, (a0), v12, v0.t
; CHECK-NEXT: ret
entry:
tail call void @llvm.riscv.vsoxseg3.mask.nxv4i16.nxv4i32(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl)
; CHECK-LABEL: test_vsoxseg4_nxv4i16_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vsoxseg4ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_mask_nxv4i16_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vsoxseg4ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_nxv4i16_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vsoxseg4ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_mask_nxv4i16_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vsoxseg4ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_nxv4i16_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vsoxseg4ei32.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_mask_nxv4i16_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vsoxseg4ei32.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_nxv4i16_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vsoxseg5ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_mask_nxv4i16_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vsoxseg5ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_nxv4i16_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vsoxseg5ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_mask_nxv4i16_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vsoxseg5ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_nxv4i16_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vsoxseg5ei32.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_mask_nxv4i16_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vsoxseg5ei32.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_nxv4i16_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vsoxseg6ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_mask_nxv4i16_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vsoxseg6ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_nxv4i16_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vsoxseg6ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_mask_nxv4i16_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vsoxseg6ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_nxv4i16_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vsoxseg6ei32.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_mask_nxv4i16_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vsoxseg6ei32.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_nxv4i16_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vsoxseg7ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_mask_nxv4i16_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vsoxseg7ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_nxv4i16_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vsoxseg7ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_mask_nxv4i16_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vsoxseg7ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_nxv4i16_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
-; CHECK-NEXT: vmv1r.v v18, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vsoxseg7ei32.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_mask_nxv4i16_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
-; CHECK-NEXT: vmv1r.v v18, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vsoxseg7ei32.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_nxv4i16_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vsoxseg8ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_mask_nxv4i16_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vsoxseg8ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_nxv4i16_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vsoxseg8ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_mask_nxv4i16_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vsoxseg8ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_nxv4i16_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
-; CHECK-NEXT: vmv1r.v v18, v12
-; CHECK-NEXT: vmv1r.v v19, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
+; CHECK-NEXT: vmv1r.v v19, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vsoxseg8ei32.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_mask_nxv4i16_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
-; CHECK-NEXT: vmv1r.v v18, v12
-; CHECK-NEXT: vmv1r.v v19, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
+; CHECK-NEXT: vmv1r.v v19, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vsoxseg8ei32.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
define void @test_vsoxseg2_nxv1i32_nxv1i8(<vscale x 1 x i32> %val, i32* %base, <vscale x 1 x i8> %index, i32 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv1i32_nxv1i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
define void @test_vsoxseg2_mask_nxv1i32_nxv1i8(<vscale x 1 x i32> %val, i32* %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv1i32_nxv1i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
define void @test_vsoxseg2_nxv1i32_nxv1i32(<vscale x 1 x i32> %val, i32* %base, <vscale x 1 x i32> %index, i32 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv1i32_nxv1i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
define void @test_vsoxseg2_mask_nxv1i32_nxv1i32(<vscale x 1 x i32> %val, i32* %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv1i32_nxv1i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
define void @test_vsoxseg2_nxv1i32_nxv1i16(<vscale x 1 x i32> %val, i32* %base, <vscale x 1 x i16> %index, i32 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv1i32_nxv1i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
define void @test_vsoxseg2_mask_nxv1i32_nxv1i16(<vscale x 1 x i32> %val, i32* %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv1i32_nxv1i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-LABEL: test_vsoxseg3_nxv1i32_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vsoxseg3ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_mask_nxv1i32_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vsoxseg3ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_nxv1i32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vsoxseg3ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_mask_nxv1i32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vsoxseg3ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_nxv1i32_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vsoxseg3ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_mask_nxv1i32_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vsoxseg3ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_nxv1i32_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vsoxseg4ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_mask_nxv1i32_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vsoxseg4ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_nxv1i32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vsoxseg4ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_mask_nxv1i32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vsoxseg4ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_nxv1i32_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vsoxseg4ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_mask_nxv1i32_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vsoxseg4ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_nxv1i32_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vsoxseg5ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_mask_nxv1i32_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vsoxseg5ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_nxv1i32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vsoxseg5ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_mask_nxv1i32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vsoxseg5ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_nxv1i32_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vsoxseg5ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_mask_nxv1i32_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vsoxseg5ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_nxv1i32_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vsoxseg6ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_mask_nxv1i32_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vsoxseg6ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_nxv1i32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vsoxseg6ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_mask_nxv1i32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vsoxseg6ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_nxv1i32_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vsoxseg6ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_mask_nxv1i32_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vsoxseg6ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_nxv1i32_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vsoxseg7ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_mask_nxv1i32_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vsoxseg7ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_nxv1i32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vsoxseg7ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_mask_nxv1i32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vsoxseg7ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_nxv1i32_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vsoxseg7ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_mask_nxv1i32_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vsoxseg7ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_nxv1i32_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vsoxseg8ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_mask_nxv1i32_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vsoxseg8ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_nxv1i32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vsoxseg8ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_mask_nxv1i32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vsoxseg8ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_nxv1i32_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vsoxseg8ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_mask_nxv1i32_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vsoxseg8ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
define void @test_vsoxseg2_nxv8i16_nxv8i16(<vscale x 8 x i16> %val, i16* %base, <vscale x 8 x i16> %index, i32 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv8i16_nxv8i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
; CHECK-NEXT: vmv2r.v v12, v10
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
define void @test_vsoxseg2_mask_nxv8i16_nxv8i16(<vscale x 8 x i16> %val, i16* %base, <vscale x 8 x i16> %index, <vscale x 8 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv8i16_nxv8i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
; CHECK-NEXT: vmv2r.v v12, v10
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
define void @test_vsoxseg2_nxv8i16_nxv8i8(<vscale x 8 x i16> %val, i16* %base, <vscale x 8 x i8> %index, i32 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv8i16_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
; CHECK-NEXT: vmv1r.v v12, v10
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
define void @test_vsoxseg2_mask_nxv8i16_nxv8i8(<vscale x 8 x i16> %val, i16* %base, <vscale x 8 x i8> %index, <vscale x 8 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv8i16_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
; CHECK-NEXT: vmv1r.v v12, v10
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
define void @test_vsoxseg2_nxv8i16_nxv8i32(<vscale x 8 x i16> %val, i16* %base, <vscale x 8 x i32> %index, i32 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv8i16_nxv8i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v12
define void @test_vsoxseg2_mask_nxv8i16_nxv8i32(<vscale x 8 x i16> %val, i16* %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv8i16_nxv8i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v12, v0.t
; CHECK-LABEL: test_vsoxseg3_nxv8i16_nxv8i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
; CHECK-NEXT: vsoxseg3ei16.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_mask_nxv8i16_nxv8i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
; CHECK-NEXT: vsoxseg3ei16.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_nxv8i16_nxv8i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
; CHECK-NEXT: vsoxseg3ei8.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_mask_nxv8i16_nxv8i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
; CHECK-NEXT: vsoxseg3ei8.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
define void @test_vsoxseg3_nxv8i16_nxv8i32(<vscale x 8 x i16> %val, i16* %base, <vscale x 8 x i32> %index, i32 %vl) {
; CHECK-LABEL: test_vsoxseg3_nxv8i16_nxv8i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv2r.v v16, v8
-; CHECK-NEXT: vmv2r.v v18, v16
-; CHECK-NEXT: vmv2r.v v20, v16
+; CHECK-NEXT: vmv2r.v v10, v8
+; CHECK-NEXT: vmv4r.v v16, v12
+; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
-; CHECK-NEXT: vsoxseg3ei32.v v16, (a0), v12
+; CHECK-NEXT: vsoxseg3ei32.v v8, (a0), v16
; CHECK-NEXT: ret
entry:
tail call void @llvm.riscv.vsoxseg3.nxv8i16.nxv8i32(<vscale x 8 x i16> %val,<vscale x 8 x i16> %val,<vscale x 8 x i16> %val, i16* %base, <vscale x 8 x i32> %index, i32 %vl)
define void @test_vsoxseg3_mask_nxv8i16_nxv8i32(<vscale x 8 x i16> %val, i16* %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsoxseg3_mask_nxv8i16_nxv8i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv2r.v v16, v8
-; CHECK-NEXT: vmv2r.v v18, v16
-; CHECK-NEXT: vmv2r.v v20, v16
+; CHECK-NEXT: vmv2r.v v10, v8
+; CHECK-NEXT: vmv4r.v v16, v12
+; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
-; CHECK-NEXT: vsoxseg3ei32.v v16, (a0), v12, v0.t
+; CHECK-NEXT: vsoxseg3ei32.v v8, (a0), v16, v0.t
; CHECK-NEXT: ret
entry:
tail call void @llvm.riscv.vsoxseg3.mask.nxv8i16.nxv8i32(<vscale x 8 x i16> %val,<vscale x 8 x i16> %val,<vscale x 8 x i16> %val, i16* %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i32 %vl)
; CHECK-LABEL: test_vsoxseg4_nxv8i16_nxv8i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
-; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
+; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
; CHECK-NEXT: vsoxseg4ei16.v v12, (a0), v10
; CHECK-NEXT: ret
entry:
; CHECK-LABEL: test_vsoxseg4_mask_nxv8i16_nxv8i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
; CHECK-NEXT: vsoxseg4ei16.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_nxv8i16_nxv8i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
; CHECK-NEXT: vsoxseg4ei8.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_mask_nxv8i16_nxv8i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
; CHECK-NEXT: vsoxseg4ei8.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_nxv8i16_nxv8i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v16, v8
-; CHECK-NEXT: vmv2r.v v18, v16
-; CHECK-NEXT: vmv2r.v v20, v16
-; CHECK-NEXT: vmv2r.v v22, v16
+; CHECK-NEXT: vmv2r.v v18, v8
+; CHECK-NEXT: vmv2r.v v20, v8
+; CHECK-NEXT: vmv2r.v v22, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
; CHECK-NEXT: vsoxseg4ei32.v v16, (a0), v12
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_mask_nxv8i16_nxv8i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v16, v8
-; CHECK-NEXT: vmv2r.v v18, v16
-; CHECK-NEXT: vmv2r.v v20, v16
-; CHECK-NEXT: vmv2r.v v22, v16
+; CHECK-NEXT: vmv2r.v v18, v8
+; CHECK-NEXT: vmv2r.v v20, v8
+; CHECK-NEXT: vmv2r.v v22, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
; CHECK-NEXT: vsoxseg4ei32.v v16, (a0), v12, v0.t
; CHECK-NEXT: ret
define void @test_vsoxseg2_nxv8i8_nxv8i16(<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i16> %index, i32 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv8i8_nxv8i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v10
define void @test_vsoxseg2_mask_nxv8i8_nxv8i16(<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i16> %index, <vscale x 8 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv8i8_nxv8i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v10, v0.t
define void @test_vsoxseg2_nxv8i8_nxv8i8(<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i8> %index, i32 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv8i8_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
define void @test_vsoxseg2_mask_nxv8i8_nxv8i8(<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i8> %index, <vscale x 8 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv8i8_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
define void @test_vsoxseg2_nxv8i8_nxv8i32(<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i32> %index, i32 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv8i8_nxv8i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v12
define void @test_vsoxseg2_mask_nxv8i8_nxv8i32(<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv8i8_nxv8i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v12, v0.t
define void @test_vsoxseg3_nxv8i8_nxv8i16(<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i16> %index, i32 %vl) {
; CHECK-LABEL: test_vsoxseg3_nxv8i8_nxv8i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv2r.v v12, v10
+; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
-; CHECK-NEXT: vsoxseg3ei16.v v12, (a0), v10
+; CHECK-NEXT: vsoxseg3ei16.v v8, (a0), v12
; CHECK-NEXT: ret
entry:
tail call void @llvm.riscv.vsoxseg3.nxv8i8.nxv8i16(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i16> %index, i32 %vl)
define void @test_vsoxseg3_mask_nxv8i8_nxv8i16(<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i16> %index, <vscale x 8 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsoxseg3_mask_nxv8i8_nxv8i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv2r.v v12, v10
+; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
-; CHECK-NEXT: vsoxseg3ei16.v v12, (a0), v10, v0.t
+; CHECK-NEXT: vsoxseg3ei16.v v8, (a0), v12, v0.t
; CHECK-NEXT: ret
entry:
tail call void @llvm.riscv.vsoxseg3.mask.nxv8i8.nxv8i16(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i16> %index, <vscale x 8 x i1> %mask, i32 %vl)
; CHECK-LABEL: test_vsoxseg3_nxv8i8_nxv8i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vsoxseg3ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_mask_nxv8i8_nxv8i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vsoxseg3ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
define void @test_vsoxseg3_nxv8i8_nxv8i32(<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i32> %index, i32 %vl) {
; CHECK-LABEL: test_vsoxseg3_nxv8i8_nxv8i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
define void @test_vsoxseg3_mask_nxv8i8_nxv8i32(<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsoxseg3_mask_nxv8i8_nxv8i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-LABEL: test_vsoxseg4_nxv8i8_nxv8i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vsoxseg4ei16.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_mask_nxv8i8_nxv8i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vsoxseg4ei16.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_nxv8i8_nxv8i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vsoxseg4ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_mask_nxv8i8_nxv8i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vsoxseg4ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
define void @test_vsoxseg4_nxv8i8_nxv8i32(<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i32> %index, i32 %vl) {
; CHECK-LABEL: test_vsoxseg4_nxv8i8_nxv8i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsoxseg4_mask_nxv8i8_nxv8i32(<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsoxseg4_mask_nxv8i8_nxv8i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
; CHECK-LABEL: test_vsoxseg5_nxv8i8_nxv8i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vsoxseg5ei16.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_mask_nxv8i8_nxv8i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vsoxseg5ei16.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_nxv8i8_nxv8i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vsoxseg5ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_mask_nxv8i8_nxv8i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vsoxseg5ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
define void @test_vsoxseg5_nxv8i8_nxv8i32(<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i32> %index, i32 %vl) {
; CHECK-LABEL: test_vsoxseg5_nxv8i8_nxv8i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v16, v8
-; CHECK-NEXT: vmv1r.v v17, v16
-; CHECK-NEXT: vmv1r.v v18, v16
-; CHECK-NEXT: vmv1r.v v19, v16
-; CHECK-NEXT: vmv1r.v v20, v16
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv4r.v v16, v12
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
-; CHECK-NEXT: vsoxseg5ei32.v v16, (a0), v12
+; CHECK-NEXT: vsoxseg5ei32.v v8, (a0), v16
; CHECK-NEXT: ret
entry:
tail call void @llvm.riscv.vsoxseg5.nxv8i8.nxv8i32(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i32> %index, i32 %vl)
define void @test_vsoxseg5_mask_nxv8i8_nxv8i32(<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsoxseg5_mask_nxv8i8_nxv8i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v16, v8
-; CHECK-NEXT: vmv1r.v v17, v16
-; CHECK-NEXT: vmv1r.v v18, v16
-; CHECK-NEXT: vmv1r.v v19, v16
-; CHECK-NEXT: vmv1r.v v20, v16
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv4r.v v16, v12
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
-; CHECK-NEXT: vsoxseg5ei32.v v16, (a0), v12, v0.t
+; CHECK-NEXT: vsoxseg5ei32.v v8, (a0), v16, v0.t
; CHECK-NEXT: ret
entry:
tail call void @llvm.riscv.vsoxseg5.mask.nxv8i8.nxv8i32(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i32 %vl)
; CHECK-LABEL: test_vsoxseg6_nxv8i8_nxv8i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vsoxseg6ei16.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_mask_nxv8i8_nxv8i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vsoxseg6ei16.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_nxv8i8_nxv8i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vsoxseg6ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_mask_nxv8i8_nxv8i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vsoxseg6ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_nxv8i8_nxv8i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v16, v8
-; CHECK-NEXT: vmv1r.v v17, v16
-; CHECK-NEXT: vmv1r.v v18, v16
-; CHECK-NEXT: vmv1r.v v19, v16
-; CHECK-NEXT: vmv1r.v v20, v16
-; CHECK-NEXT: vmv1r.v v21, v16
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
+; CHECK-NEXT: vmv1r.v v19, v8
+; CHECK-NEXT: vmv1r.v v20, v8
+; CHECK-NEXT: vmv1r.v v21, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vsoxseg6ei32.v v16, (a0), v12
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_mask_nxv8i8_nxv8i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v16, v8
-; CHECK-NEXT: vmv1r.v v17, v16
-; CHECK-NEXT: vmv1r.v v18, v16
-; CHECK-NEXT: vmv1r.v v19, v16
-; CHECK-NEXT: vmv1r.v v20, v16
-; CHECK-NEXT: vmv1r.v v21, v16
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
+; CHECK-NEXT: vmv1r.v v19, v8
+; CHECK-NEXT: vmv1r.v v20, v8
+; CHECK-NEXT: vmv1r.v v21, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vsoxseg6ei32.v v16, (a0), v12, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_nxv8i8_nxv8i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
-; CHECK-NEXT: vmv1r.v v18, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vsoxseg7ei16.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_mask_nxv8i8_nxv8i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
-; CHECK-NEXT: vmv1r.v v18, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vsoxseg7ei16.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_nxv8i8_nxv8i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vsoxseg7ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_mask_nxv8i8_nxv8i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vsoxseg7ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_nxv8i8_nxv8i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v16, v8
-; CHECK-NEXT: vmv1r.v v17, v16
-; CHECK-NEXT: vmv1r.v v18, v16
-; CHECK-NEXT: vmv1r.v v19, v16
-; CHECK-NEXT: vmv1r.v v20, v16
-; CHECK-NEXT: vmv1r.v v21, v16
-; CHECK-NEXT: vmv1r.v v22, v16
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
+; CHECK-NEXT: vmv1r.v v19, v8
+; CHECK-NEXT: vmv1r.v v20, v8
+; CHECK-NEXT: vmv1r.v v21, v8
+; CHECK-NEXT: vmv1r.v v22, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vsoxseg7ei32.v v16, (a0), v12
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_mask_nxv8i8_nxv8i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v16, v8
-; CHECK-NEXT: vmv1r.v v17, v16
-; CHECK-NEXT: vmv1r.v v18, v16
-; CHECK-NEXT: vmv1r.v v19, v16
-; CHECK-NEXT: vmv1r.v v20, v16
-; CHECK-NEXT: vmv1r.v v21, v16
-; CHECK-NEXT: vmv1r.v v22, v16
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
+; CHECK-NEXT: vmv1r.v v19, v8
+; CHECK-NEXT: vmv1r.v v20, v8
+; CHECK-NEXT: vmv1r.v v21, v8
+; CHECK-NEXT: vmv1r.v v22, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vsoxseg7ei32.v v16, (a0), v12, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_nxv8i8_nxv8i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
-; CHECK-NEXT: vmv1r.v v18, v12
-; CHECK-NEXT: vmv1r.v v19, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
+; CHECK-NEXT: vmv1r.v v19, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vsoxseg8ei16.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_mask_nxv8i8_nxv8i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
-; CHECK-NEXT: vmv1r.v v18, v12
-; CHECK-NEXT: vmv1r.v v19, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
+; CHECK-NEXT: vmv1r.v v19, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vsoxseg8ei16.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_nxv8i8_nxv8i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vsoxseg8ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_mask_nxv8i8_nxv8i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vsoxseg8ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_nxv8i8_nxv8i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v16, v8
-; CHECK-NEXT: vmv1r.v v17, v16
-; CHECK-NEXT: vmv1r.v v18, v16
-; CHECK-NEXT: vmv1r.v v19, v16
-; CHECK-NEXT: vmv1r.v v20, v16
-; CHECK-NEXT: vmv1r.v v21, v16
-; CHECK-NEXT: vmv1r.v v22, v16
-; CHECK-NEXT: vmv1r.v v23, v16
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
+; CHECK-NEXT: vmv1r.v v19, v8
+; CHECK-NEXT: vmv1r.v v20, v8
+; CHECK-NEXT: vmv1r.v v21, v8
+; CHECK-NEXT: vmv1r.v v22, v8
+; CHECK-NEXT: vmv1r.v v23, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vsoxseg8ei32.v v16, (a0), v12
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_mask_nxv8i8_nxv8i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v16, v8
-; CHECK-NEXT: vmv1r.v v17, v16
-; CHECK-NEXT: vmv1r.v v18, v16
-; CHECK-NEXT: vmv1r.v v19, v16
-; CHECK-NEXT: vmv1r.v v20, v16
-; CHECK-NEXT: vmv1r.v v21, v16
-; CHECK-NEXT: vmv1r.v v22, v16
-; CHECK-NEXT: vmv1r.v v23, v16
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
+; CHECK-NEXT: vmv1r.v v19, v8
+; CHECK-NEXT: vmv1r.v v20, v8
+; CHECK-NEXT: vmv1r.v v21, v8
+; CHECK-NEXT: vmv1r.v v22, v8
+; CHECK-NEXT: vmv1r.v v23, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vsoxseg8ei32.v v16, (a0), v12, v0.t
; CHECK-NEXT: ret
define void @test_vsoxseg2_nxv8i32_nxv8i16(<vscale x 8 x i32> %val, i32* %base, <vscale x 8 x i16> %index, i32 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv8i32_nxv8i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
; CHECK-NEXT: vmv2r.v v16, v12
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
define void @test_vsoxseg2_mask_nxv8i32_nxv8i16(<vscale x 8 x i32> %val, i32* %base, <vscale x 8 x i16> %index, <vscale x 8 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv8i32_nxv8i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
; CHECK-NEXT: vmv2r.v v16, v12
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
define void @test_vsoxseg2_nxv8i32_nxv8i8(<vscale x 8 x i32> %val, i32* %base, <vscale x 8 x i8> %index, i32 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv8i32_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
; CHECK-NEXT: vmv1r.v v16, v12
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
define void @test_vsoxseg2_mask_nxv8i32_nxv8i8(<vscale x 8 x i32> %val, i32* %base, <vscale x 8 x i8> %index, <vscale x 8 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv8i32_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
; CHECK-NEXT: vmv1r.v v16, v12
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
define void @test_vsoxseg2_nxv8i32_nxv8i32(<vscale x 8 x i32> %val, i32* %base, <vscale x 8 x i32> %index, i32 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv8i32_nxv8i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
; CHECK-NEXT: vmv4r.v v16, v12
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
define void @test_vsoxseg2_mask_nxv8i32_nxv8i32(<vscale x 8 x i32> %val, i32* %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv8i32_nxv8i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
; CHECK-NEXT: vmv4r.v v16, v12
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
define void @test_vsoxseg2_nxv4i8_nxv4i16(<vscale x 4 x i8> %val, i8* %base, <vscale x 4 x i16> %index, i32 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv4i8_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
define void @test_vsoxseg2_mask_nxv4i8_nxv4i16(<vscale x 4 x i8> %val, i8* %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv4i8_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
define void @test_vsoxseg2_nxv4i8_nxv4i8(<vscale x 4 x i8> %val, i8* %base, <vscale x 4 x i8> %index, i32 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv4i8_nxv4i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
define void @test_vsoxseg2_mask_nxv4i8_nxv4i8(<vscale x 4 x i8> %val, i8* %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv4i8_nxv4i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
define void @test_vsoxseg2_nxv4i8_nxv4i32(<vscale x 4 x i8> %val, i8* %base, <vscale x 4 x i32> %index, i32 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv4i8_nxv4i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v10
define void @test_vsoxseg2_mask_nxv4i8_nxv4i32(<vscale x 4 x i8> %val, i8* %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv4i8_nxv4i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v10, v0.t
; CHECK-LABEL: test_vsoxseg3_nxv4i8_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vsoxseg3ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_mask_nxv4i8_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vsoxseg3ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_nxv4i8_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vsoxseg3ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_mask_nxv4i8_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vsoxseg3ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
define void @test_vsoxseg3_nxv4i8_nxv4i32(<vscale x 4 x i8> %val, i8* %base, <vscale x 4 x i32> %index, i32 %vl) {
; CHECK-LABEL: test_vsoxseg3_nxv4i8_nxv4i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv2r.v v12, v10
+; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
-; CHECK-NEXT: vsoxseg3ei32.v v12, (a0), v10
+; CHECK-NEXT: vsoxseg3ei32.v v8, (a0), v12
; CHECK-NEXT: ret
entry:
tail call void @llvm.riscv.vsoxseg3.nxv4i8.nxv4i32(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 4 x i32> %index, i32 %vl)
define void @test_vsoxseg3_mask_nxv4i8_nxv4i32(<vscale x 4 x i8> %val, i8* %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsoxseg3_mask_nxv4i8_nxv4i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv2r.v v12, v10
+; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
-; CHECK-NEXT: vsoxseg3ei32.v v12, (a0), v10, v0.t
+; CHECK-NEXT: vsoxseg3ei32.v v8, (a0), v12, v0.t
; CHECK-NEXT: ret
entry:
tail call void @llvm.riscv.vsoxseg3.mask.nxv4i8.nxv4i32(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl)
; CHECK-LABEL: test_vsoxseg4_nxv4i8_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vsoxseg4ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_mask_nxv4i8_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vsoxseg4ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_nxv4i8_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vsoxseg4ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_mask_nxv4i8_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vsoxseg4ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_nxv4i8_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vsoxseg4ei32.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_mask_nxv4i8_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vsoxseg4ei32.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_nxv4i8_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vsoxseg5ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_mask_nxv4i8_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vsoxseg5ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_nxv4i8_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vsoxseg5ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_mask_nxv4i8_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vsoxseg5ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_nxv4i8_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vsoxseg5ei32.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_mask_nxv4i8_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vsoxseg5ei32.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_nxv4i8_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vsoxseg6ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_mask_nxv4i8_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vsoxseg6ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_nxv4i8_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vsoxseg6ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_mask_nxv4i8_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vsoxseg6ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_nxv4i8_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vsoxseg6ei32.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_mask_nxv4i8_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vsoxseg6ei32.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_nxv4i8_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vsoxseg7ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_mask_nxv4i8_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vsoxseg7ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_nxv4i8_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vsoxseg7ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_mask_nxv4i8_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vsoxseg7ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_nxv4i8_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
-; CHECK-NEXT: vmv1r.v v18, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vsoxseg7ei32.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_mask_nxv4i8_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
-; CHECK-NEXT: vmv1r.v v18, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vsoxseg7ei32.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_nxv4i8_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vsoxseg8ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_mask_nxv4i8_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vsoxseg8ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_nxv4i8_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vsoxseg8ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_mask_nxv4i8_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vsoxseg8ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_nxv4i8_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
-; CHECK-NEXT: vmv1r.v v18, v12
-; CHECK-NEXT: vmv1r.v v19, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
+; CHECK-NEXT: vmv1r.v v19, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vsoxseg8ei32.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_mask_nxv4i8_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
-; CHECK-NEXT: vmv1r.v v18, v12
-; CHECK-NEXT: vmv1r.v v19, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
+; CHECK-NEXT: vmv1r.v v19, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vsoxseg8ei32.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
define void @test_vsoxseg2_nxv1i16_nxv1i8(<vscale x 1 x i16> %val, i16* %base, <vscale x 1 x i8> %index, i32 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv1i16_nxv1i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
define void @test_vsoxseg2_mask_nxv1i16_nxv1i8(<vscale x 1 x i16> %val, i16* %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv1i16_nxv1i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
define void @test_vsoxseg2_nxv1i16_nxv1i32(<vscale x 1 x i16> %val, i16* %base, <vscale x 1 x i32> %index, i32 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv1i16_nxv1i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
define void @test_vsoxseg2_mask_nxv1i16_nxv1i32(<vscale x 1 x i16> %val, i16* %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv1i16_nxv1i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
define void @test_vsoxseg2_nxv1i16_nxv1i16(<vscale x 1 x i16> %val, i16* %base, <vscale x 1 x i16> %index, i32 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv1i16_nxv1i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
define void @test_vsoxseg2_mask_nxv1i16_nxv1i16(<vscale x 1 x i16> %val, i16* %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv1i16_nxv1i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-LABEL: test_vsoxseg3_nxv1i16_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vsoxseg3ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_mask_nxv1i16_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vsoxseg3ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_nxv1i16_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vsoxseg3ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_mask_nxv1i16_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vsoxseg3ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_nxv1i16_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vsoxseg3ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_mask_nxv1i16_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vsoxseg3ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_nxv1i16_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vsoxseg4ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_mask_nxv1i16_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vsoxseg4ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_nxv1i16_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vsoxseg4ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_mask_nxv1i16_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vsoxseg4ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_nxv1i16_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vsoxseg4ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_mask_nxv1i16_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vsoxseg4ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_nxv1i16_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vsoxseg5ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_mask_nxv1i16_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vsoxseg5ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_nxv1i16_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vsoxseg5ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_mask_nxv1i16_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vsoxseg5ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_nxv1i16_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vsoxseg5ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_mask_nxv1i16_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vsoxseg5ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_nxv1i16_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vsoxseg6ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_mask_nxv1i16_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vsoxseg6ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_nxv1i16_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vsoxseg6ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_mask_nxv1i16_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vsoxseg6ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_nxv1i16_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vsoxseg6ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_mask_nxv1i16_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vsoxseg6ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_nxv1i16_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vsoxseg7ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_mask_nxv1i16_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vsoxseg7ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_nxv1i16_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vsoxseg7ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_mask_nxv1i16_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vsoxseg7ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_nxv1i16_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vsoxseg7ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_mask_nxv1i16_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vsoxseg7ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_nxv1i16_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vsoxseg8ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_mask_nxv1i16_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vsoxseg8ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_nxv1i16_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vsoxseg8ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_mask_nxv1i16_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vsoxseg8ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_nxv1i16_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vsoxseg8ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_mask_nxv1i16_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vsoxseg8ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
define void @test_vsoxseg2_nxv32i8_nxv32i16(<vscale x 32 x i8> %val, i8* %base, <vscale x 32 x i16> %index, i32 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv32i8_nxv32i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu
; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v16
define void @test_vsoxseg2_mask_nxv32i8_nxv32i16(<vscale x 32 x i8> %val, i8* %base, <vscale x 32 x i16> %index, <vscale x 32 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv32i8_nxv32i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu
; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v16, v0.t
define void @test_vsoxseg2_nxv32i8_nxv32i8(<vscale x 32 x i8> %val, i8* %base, <vscale x 32 x i8> %index, i32 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv32i8_nxv32i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
; CHECK-NEXT: vmv4r.v v16, v12
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu
define void @test_vsoxseg2_mask_nxv32i8_nxv32i8(<vscale x 32 x i8> %val, i8* %base, <vscale x 32 x i8> %index, <vscale x 32 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv32i8_nxv32i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
; CHECK-NEXT: vmv4r.v v16, v12
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu
define void @test_vsoxseg2_nxv2i8_nxv2i32(<vscale x 2 x i8> %val, i8* %base, <vscale x 2 x i32> %index, i32 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv2i8_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
define void @test_vsoxseg2_mask_nxv2i8_nxv2i32(<vscale x 2 x i8> %val, i8* %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv2i8_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
define void @test_vsoxseg2_nxv2i8_nxv2i8(<vscale x 2 x i8> %val, i8* %base, <vscale x 2 x i8> %index, i32 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv2i8_nxv2i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
define void @test_vsoxseg2_mask_nxv2i8_nxv2i8(<vscale x 2 x i8> %val, i8* %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv2i8_nxv2i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
define void @test_vsoxseg2_nxv2i8_nxv2i16(<vscale x 2 x i8> %val, i8* %base, <vscale x 2 x i16> %index, i32 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv2i8_nxv2i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
define void @test_vsoxseg2_mask_nxv2i8_nxv2i16(<vscale x 2 x i8> %val, i8* %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv2i8_nxv2i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-LABEL: test_vsoxseg3_nxv2i8_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vsoxseg3ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_mask_nxv2i8_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vsoxseg3ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_nxv2i8_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vsoxseg3ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_mask_nxv2i8_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vsoxseg3ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_nxv2i8_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vsoxseg3ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_mask_nxv2i8_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vsoxseg3ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_nxv2i8_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vsoxseg4ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_mask_nxv2i8_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vsoxseg4ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_nxv2i8_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vsoxseg4ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_mask_nxv2i8_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vsoxseg4ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_nxv2i8_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vsoxseg4ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_mask_nxv2i8_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vsoxseg4ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_nxv2i8_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vsoxseg5ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_mask_nxv2i8_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vsoxseg5ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_nxv2i8_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vsoxseg5ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_mask_nxv2i8_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vsoxseg5ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_nxv2i8_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vsoxseg5ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_mask_nxv2i8_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vsoxseg5ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_nxv2i8_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vsoxseg6ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_mask_nxv2i8_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vsoxseg6ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_nxv2i8_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vsoxseg6ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_mask_nxv2i8_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vsoxseg6ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_nxv2i8_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vsoxseg6ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_mask_nxv2i8_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vsoxseg6ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_nxv2i8_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vsoxseg7ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_mask_nxv2i8_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vsoxseg7ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_nxv2i8_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vsoxseg7ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_mask_nxv2i8_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vsoxseg7ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_nxv2i8_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vsoxseg7ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_mask_nxv2i8_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vsoxseg7ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_nxv2i8_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vsoxseg8ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_mask_nxv2i8_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vsoxseg8ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_nxv2i8_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vsoxseg8ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_mask_nxv2i8_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vsoxseg8ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_nxv2i8_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vsoxseg8ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_mask_nxv2i8_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vsoxseg8ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
define void @test_vsoxseg2_nxv2i16_nxv2i32(<vscale x 2 x i16> %val, i16* %base, <vscale x 2 x i32> %index, i32 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv2i16_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
define void @test_vsoxseg2_mask_nxv2i16_nxv2i32(<vscale x 2 x i16> %val, i16* %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv2i16_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
define void @test_vsoxseg2_nxv2i16_nxv2i8(<vscale x 2 x i16> %val, i16* %base, <vscale x 2 x i8> %index, i32 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv2i16_nxv2i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
define void @test_vsoxseg2_mask_nxv2i16_nxv2i8(<vscale x 2 x i16> %val, i16* %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv2i16_nxv2i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
define void @test_vsoxseg2_nxv2i16_nxv2i16(<vscale x 2 x i16> %val, i16* %base, <vscale x 2 x i16> %index, i32 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv2i16_nxv2i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
define void @test_vsoxseg2_mask_nxv2i16_nxv2i16(<vscale x 2 x i16> %val, i16* %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv2i16_nxv2i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-LABEL: test_vsoxseg3_nxv2i16_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vsoxseg3ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_mask_nxv2i16_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vsoxseg3ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_nxv2i16_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vsoxseg3ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_mask_nxv2i16_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vsoxseg3ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_nxv2i16_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vsoxseg3ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_mask_nxv2i16_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vsoxseg3ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_nxv2i16_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vsoxseg4ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_mask_nxv2i16_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vsoxseg4ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_nxv2i16_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vsoxseg4ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_mask_nxv2i16_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vsoxseg4ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_nxv2i16_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vsoxseg4ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_mask_nxv2i16_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vsoxseg4ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_nxv2i16_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vsoxseg5ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_mask_nxv2i16_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vsoxseg5ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_nxv2i16_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vsoxseg5ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_mask_nxv2i16_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vsoxseg5ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_nxv2i16_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vsoxseg5ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_mask_nxv2i16_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vsoxseg5ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_nxv2i16_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vsoxseg6ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_mask_nxv2i16_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vsoxseg6ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_nxv2i16_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vsoxseg6ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_mask_nxv2i16_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vsoxseg6ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_nxv2i16_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vsoxseg6ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_mask_nxv2i16_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vsoxseg6ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_nxv2i16_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vsoxseg7ei32.v v10, (a0), v9
; CHECK-NEXT: ret
entry:
; CHECK-LABEL: test_vsoxseg7_mask_nxv2i16_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vsoxseg7ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_nxv2i16_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vsoxseg7ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_mask_nxv2i16_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vsoxseg7ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_nxv2i16_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vsoxseg7ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_mask_nxv2i16_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vsoxseg7ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_nxv2i16_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vsoxseg8ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_mask_nxv2i16_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vsoxseg8ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_nxv2i16_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vsoxseg8ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_mask_nxv2i16_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vsoxseg8ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_nxv2i16_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vsoxseg8ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_mask_nxv2i16_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vsoxseg8ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
define void @test_vsoxseg2_nxv4i32_nxv4i16(<vscale x 4 x i32> %val, i32* %base, <vscale x 4 x i16> %index, i32 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv4i32_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
; CHECK-NEXT: vmv1r.v v12, v10
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
define void @test_vsoxseg2_mask_nxv4i32_nxv4i16(<vscale x 4 x i32> %val, i32* %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv4i32_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
; CHECK-NEXT: vmv1r.v v12, v10
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
define void @test_vsoxseg2_nxv4i32_nxv4i8(<vscale x 4 x i32> %val, i32* %base, <vscale x 4 x i8> %index, i32 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv4i32_nxv4i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
; CHECK-NEXT: vmv1r.v v12, v10
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
define void @test_vsoxseg2_mask_nxv4i32_nxv4i8(<vscale x 4 x i32> %val, i32* %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv4i32_nxv4i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
; CHECK-NEXT: vmv1r.v v12, v10
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
define void @test_vsoxseg2_nxv4i32_nxv4i32(<vscale x 4 x i32> %val, i32* %base, <vscale x 4 x i32> %index, i32 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv4i32_nxv4i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
; CHECK-NEXT: vmv2r.v v12, v10
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
define void @test_vsoxseg2_mask_nxv4i32_nxv4i32(<vscale x 4 x i32> %val, i32* %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv4i32_nxv4i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
; CHECK-NEXT: vmv2r.v v12, v10
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; CHECK-LABEL: test_vsoxseg3_nxv4i32_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; CHECK-NEXT: vsoxseg3ei16.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_mask_nxv4i32_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; CHECK-NEXT: vsoxseg3ei16.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_nxv4i32_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; CHECK-NEXT: vsoxseg3ei8.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_mask_nxv4i32_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; CHECK-NEXT: vsoxseg3ei8.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_nxv4i32_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; CHECK-NEXT: vsoxseg3ei32.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_mask_nxv4i32_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; CHECK-NEXT: vsoxseg3ei32.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_nxv4i32_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; CHECK-NEXT: vsoxseg4ei16.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_mask_nxv4i32_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; CHECK-NEXT: vsoxseg4ei16.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_nxv4i32_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; CHECK-NEXT: vsoxseg4ei8.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_mask_nxv4i32_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; CHECK-NEXT: vsoxseg4ei8.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_nxv4i32_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; CHECK-NEXT: vsoxseg4ei32.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_mask_nxv4i32_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; CHECK-NEXT: vsoxseg4ei32.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
define void @test_vsoxseg2_nxv16f16_nxv16i16(<vscale x 16 x half> %val, half* %base, <vscale x 16 x i16> %index, i32 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv16f16_nxv16i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
; CHECK-NEXT: vmv4r.v v16, v12
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
define void @test_vsoxseg2_mask_nxv16f16_nxv16i16(<vscale x 16 x half> %val, half* %base, <vscale x 16 x i16> %index, <vscale x 16 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv16f16_nxv16i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
; CHECK-NEXT: vmv4r.v v16, v12
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
define void @test_vsoxseg2_nxv16f16_nxv16i8(<vscale x 16 x half> %val, half* %base, <vscale x 16 x i8> %index, i32 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv16f16_nxv16i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
; CHECK-NEXT: vmv2r.v v16, v12
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
define void @test_vsoxseg2_mask_nxv16f16_nxv16i8(<vscale x 16 x half> %val, half* %base, <vscale x 16 x i8> %index, <vscale x 16 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv16f16_nxv16i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
; CHECK-NEXT: vmv2r.v v16, v12
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
define void @test_vsoxseg2_nxv16f16_nxv16i32(<vscale x 16 x half> %val, half* %base, <vscale x 16 x i32> %index, i32 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv16f16_nxv16i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v16
define void @test_vsoxseg2_mask_nxv16f16_nxv16i32(<vscale x 16 x half> %val, half* %base, <vscale x 16 x i32> %index, <vscale x 16 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv16f16_nxv16i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v16, v0.t
define void @test_vsoxseg2_nxv4f64_nxv4i16(<vscale x 4 x double> %val, double* %base, <vscale x 4 x i16> %index, i32 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv4f64_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
; CHECK-NEXT: vmv1r.v v16, v12
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu
define void @test_vsoxseg2_mask_nxv4f64_nxv4i16(<vscale x 4 x double> %val, double* %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv4f64_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
; CHECK-NEXT: vmv1r.v v16, v12
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu
define void @test_vsoxseg2_nxv4f64_nxv4i8(<vscale x 4 x double> %val, double* %base, <vscale x 4 x i8> %index, i32 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv4f64_nxv4i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
; CHECK-NEXT: vmv1r.v v16, v12
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu
define void @test_vsoxseg2_mask_nxv4f64_nxv4i8(<vscale x 4 x double> %val, double* %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv4f64_nxv4i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
; CHECK-NEXT: vmv1r.v v16, v12
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu
define void @test_vsoxseg2_nxv4f64_nxv4i32(<vscale x 4 x double> %val, double* %base, <vscale x 4 x i32> %index, i32 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv4f64_nxv4i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
; CHECK-NEXT: vmv2r.v v16, v12
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu
define void @test_vsoxseg2_mask_nxv4f64_nxv4i32(<vscale x 4 x double> %val, double* %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv4f64_nxv4i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
; CHECK-NEXT: vmv2r.v v16, v12
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu
define void @test_vsoxseg2_nxv1f64_nxv1i8(<vscale x 1 x double> %val, double* %base, <vscale x 1 x i8> %index, i32 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv1f64_nxv1i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
define void @test_vsoxseg2_mask_nxv1f64_nxv1i8(<vscale x 1 x double> %val, double* %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv1f64_nxv1i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
define void @test_vsoxseg2_nxv1f64_nxv1i32(<vscale x 1 x double> %val, double* %base, <vscale x 1 x i32> %index, i32 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv1f64_nxv1i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
define void @test_vsoxseg2_mask_nxv1f64_nxv1i32(<vscale x 1 x double> %val, double* %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv1f64_nxv1i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
define void @test_vsoxseg2_nxv1f64_nxv1i16(<vscale x 1 x double> %val, double* %base, <vscale x 1 x i16> %index, i32 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv1f64_nxv1i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
define void @test_vsoxseg2_mask_nxv1f64_nxv1i16(<vscale x 1 x double> %val, double* %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv1f64_nxv1i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-LABEL: test_vsoxseg3_nxv1f64_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vsoxseg3ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_mask_nxv1f64_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vsoxseg3ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_nxv1f64_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vsoxseg3ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_mask_nxv1f64_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vsoxseg3ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_nxv1f64_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vsoxseg3ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_mask_nxv1f64_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vsoxseg3ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_nxv1f64_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vsoxseg4ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_mask_nxv1f64_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vsoxseg4ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_nxv1f64_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vsoxseg4ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_mask_nxv1f64_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vsoxseg4ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_nxv1f64_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vsoxseg4ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_mask_nxv1f64_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vsoxseg4ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_nxv1f64_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vsoxseg5ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_mask_nxv1f64_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vsoxseg5ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_nxv1f64_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vsoxseg5ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_mask_nxv1f64_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vsoxseg5ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_nxv1f64_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vsoxseg5ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_mask_nxv1f64_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vsoxseg5ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_nxv1f64_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vsoxseg6ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_mask_nxv1f64_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vsoxseg6ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_nxv1f64_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vsoxseg6ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_mask_nxv1f64_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vsoxseg6ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_nxv1f64_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vsoxseg6ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_mask_nxv1f64_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vsoxseg6ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_nxv1f64_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vsoxseg7ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_mask_nxv1f64_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vsoxseg7ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_nxv1f64_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vsoxseg7ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_mask_nxv1f64_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vsoxseg7ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_nxv1f64_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vsoxseg7ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_mask_nxv1f64_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vsoxseg7ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_nxv1f64_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vsoxseg8ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_mask_nxv1f64_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vsoxseg8ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_nxv1f64_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vsoxseg8ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_mask_nxv1f64_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vsoxseg8ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_nxv1f64_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vsoxseg8ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_mask_nxv1f64_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vsoxseg8ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
define void @test_vsoxseg2_nxv2f32_nxv2i32(<vscale x 2 x float> %val, float* %base, <vscale x 2 x i32> %index, i32 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv2f32_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
define void @test_vsoxseg2_mask_nxv2f32_nxv2i32(<vscale x 2 x float> %val, float* %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv2f32_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
define void @test_vsoxseg2_nxv2f32_nxv2i8(<vscale x 2 x float> %val, float* %base, <vscale x 2 x i8> %index, i32 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv2f32_nxv2i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
define void @test_vsoxseg2_mask_nxv2f32_nxv2i8(<vscale x 2 x float> %val, float* %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv2f32_nxv2i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
define void @test_vsoxseg2_nxv2f32_nxv2i16(<vscale x 2 x float> %val, float* %base, <vscale x 2 x i16> %index, i32 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv2f32_nxv2i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
define void @test_vsoxseg2_mask_nxv2f32_nxv2i16(<vscale x 2 x float> %val, float* %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv2f32_nxv2i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-LABEL: test_vsoxseg3_nxv2f32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vsoxseg3ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_mask_nxv2f32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vsoxseg3ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_nxv2f32_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vsoxseg3ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_mask_nxv2f32_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vsoxseg3ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_nxv2f32_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vsoxseg3ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_mask_nxv2f32_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vsoxseg3ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_nxv2f32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vsoxseg4ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_mask_nxv2f32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vsoxseg4ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_nxv2f32_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vsoxseg4ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_mask_nxv2f32_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vsoxseg4ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_nxv2f32_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vsoxseg4ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_mask_nxv2f32_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vsoxseg4ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_nxv2f32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vsoxseg5ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_mask_nxv2f32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vsoxseg5ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_nxv2f32_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vsoxseg5ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_mask_nxv2f32_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vsoxseg5ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_nxv2f32_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vsoxseg5ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_mask_nxv2f32_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vsoxseg5ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_nxv2f32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vsoxseg6ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_mask_nxv2f32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vsoxseg6ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_nxv2f32_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vsoxseg6ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_mask_nxv2f32_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vsoxseg6ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_nxv2f32_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vsoxseg6ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_mask_nxv2f32_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vsoxseg6ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_nxv2f32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vsoxseg7ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_mask_nxv2f32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vsoxseg7ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_nxv2f32_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vsoxseg7ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_mask_nxv2f32_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vsoxseg7ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_nxv2f32_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vsoxseg7ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_mask_nxv2f32_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vsoxseg7ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_nxv2f32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vsoxseg8ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_mask_nxv2f32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vsoxseg8ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_nxv2f32_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vsoxseg8ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_mask_nxv2f32_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vsoxseg8ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_nxv2f32_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vsoxseg8ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_mask_nxv2f32_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vsoxseg8ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
define void @test_vsoxseg2_nxv1f16_nxv1i8(<vscale x 1 x half> %val, half* %base, <vscale x 1 x i8> %index, i32 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv1f16_nxv1i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
define void @test_vsoxseg2_mask_nxv1f16_nxv1i8(<vscale x 1 x half> %val, half* %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv1f16_nxv1i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
define void @test_vsoxseg2_nxv1f16_nxv1i32(<vscale x 1 x half> %val, half* %base, <vscale x 1 x i32> %index, i32 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv1f16_nxv1i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
define void @test_vsoxseg2_mask_nxv1f16_nxv1i32(<vscale x 1 x half> %val, half* %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv1f16_nxv1i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
define void @test_vsoxseg2_nxv1f16_nxv1i16(<vscale x 1 x half> %val, half* %base, <vscale x 1 x i16> %index, i32 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv1f16_nxv1i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
define void @test_vsoxseg2_mask_nxv1f16_nxv1i16(<vscale x 1 x half> %val, half* %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv1f16_nxv1i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-LABEL: test_vsoxseg3_nxv1f16_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vsoxseg3ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_mask_nxv1f16_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vsoxseg3ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_nxv1f16_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vsoxseg3ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_mask_nxv1f16_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vsoxseg3ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_nxv1f16_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vsoxseg3ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_mask_nxv1f16_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vsoxseg3ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_nxv1f16_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vsoxseg4ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_mask_nxv1f16_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vsoxseg4ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_nxv1f16_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vsoxseg4ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_mask_nxv1f16_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vsoxseg4ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_nxv1f16_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vsoxseg4ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_mask_nxv1f16_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vsoxseg4ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_nxv1f16_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vsoxseg5ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_mask_nxv1f16_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vsoxseg5ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_nxv1f16_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vsoxseg5ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_mask_nxv1f16_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vsoxseg5ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_nxv1f16_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vsoxseg5ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_mask_nxv1f16_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vsoxseg5ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_nxv1f16_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vsoxseg6ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_mask_nxv1f16_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vsoxseg6ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_nxv1f16_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vsoxseg6ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_mask_nxv1f16_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vsoxseg6ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_nxv1f16_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vsoxseg6ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_mask_nxv1f16_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vsoxseg6ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_nxv1f16_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vsoxseg7ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_mask_nxv1f16_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vsoxseg7ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_nxv1f16_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vsoxseg7ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_mask_nxv1f16_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vsoxseg7ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_nxv1f16_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vsoxseg7ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_mask_nxv1f16_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vsoxseg7ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_nxv1f16_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vsoxseg8ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_mask_nxv1f16_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vsoxseg8ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_nxv1f16_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vsoxseg8ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_mask_nxv1f16_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vsoxseg8ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_nxv1f16_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vsoxseg8ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_mask_nxv1f16_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vsoxseg8ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
define void @test_vsoxseg2_nxv1f32_nxv1i8(<vscale x 1 x float> %val, float* %base, <vscale x 1 x i8> %index, i32 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv1f32_nxv1i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
define void @test_vsoxseg2_mask_nxv1f32_nxv1i8(<vscale x 1 x float> %val, float* %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv1f32_nxv1i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
define void @test_vsoxseg2_nxv1f32_nxv1i32(<vscale x 1 x float> %val, float* %base, <vscale x 1 x i32> %index, i32 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv1f32_nxv1i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
define void @test_vsoxseg2_mask_nxv1f32_nxv1i32(<vscale x 1 x float> %val, float* %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv1f32_nxv1i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
define void @test_vsoxseg2_nxv1f32_nxv1i16(<vscale x 1 x float> %val, float* %base, <vscale x 1 x i16> %index, i32 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv1f32_nxv1i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
define void @test_vsoxseg2_mask_nxv1f32_nxv1i16(<vscale x 1 x float> %val, float* %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv1f32_nxv1i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-LABEL: test_vsoxseg3_nxv1f32_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vsoxseg3ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_mask_nxv1f32_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vsoxseg3ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_nxv1f32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vsoxseg3ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_mask_nxv1f32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vsoxseg3ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_nxv1f32_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vsoxseg3ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_mask_nxv1f32_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vsoxseg3ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_nxv1f32_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vsoxseg4ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_mask_nxv1f32_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vsoxseg4ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_nxv1f32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vsoxseg4ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_mask_nxv1f32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vsoxseg4ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_nxv1f32_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vsoxseg4ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_mask_nxv1f32_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vsoxseg4ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_nxv1f32_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vsoxseg5ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_mask_nxv1f32_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vsoxseg5ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_nxv1f32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vsoxseg5ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_mask_nxv1f32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vsoxseg5ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_nxv1f32_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vsoxseg5ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_mask_nxv1f32_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vsoxseg5ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_nxv1f32_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vsoxseg6ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_mask_nxv1f32_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vsoxseg6ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_nxv1f32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vsoxseg6ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_mask_nxv1f32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vsoxseg6ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_nxv1f32_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vsoxseg6ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_mask_nxv1f32_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vsoxseg6ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_nxv1f32_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vsoxseg7ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_mask_nxv1f32_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vsoxseg7ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_nxv1f32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vsoxseg7ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_mask_nxv1f32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vsoxseg7ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_nxv1f32_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vsoxseg7ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_mask_nxv1f32_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vsoxseg7ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_nxv1f32_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vsoxseg8ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_mask_nxv1f32_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vsoxseg8ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_nxv1f32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vsoxseg8ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_mask_nxv1f32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vsoxseg8ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_nxv1f32_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vsoxseg8ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_mask_nxv1f32_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vsoxseg8ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
define void @test_vsoxseg2_nxv8f16_nxv8i16(<vscale x 8 x half> %val, half* %base, <vscale x 8 x i16> %index, i32 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv8f16_nxv8i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
; CHECK-NEXT: vmv2r.v v12, v10
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
define void @test_vsoxseg2_mask_nxv8f16_nxv8i16(<vscale x 8 x half> %val, half* %base, <vscale x 8 x i16> %index, <vscale x 8 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv8f16_nxv8i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
; CHECK-NEXT: vmv2r.v v12, v10
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
define void @test_vsoxseg2_nxv8f16_nxv8i8(<vscale x 8 x half> %val, half* %base, <vscale x 8 x i8> %index, i32 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv8f16_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
; CHECK-NEXT: vmv1r.v v12, v10
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
define void @test_vsoxseg2_mask_nxv8f16_nxv8i8(<vscale x 8 x half> %val, half* %base, <vscale x 8 x i8> %index, <vscale x 8 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv8f16_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
; CHECK-NEXT: vmv1r.v v12, v10
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
define void @test_vsoxseg2_nxv8f16_nxv8i32(<vscale x 8 x half> %val, half* %base, <vscale x 8 x i32> %index, i32 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv8f16_nxv8i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v12
define void @test_vsoxseg2_mask_nxv8f16_nxv8i32(<vscale x 8 x half> %val, half* %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv8f16_nxv8i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v12, v0.t
; CHECK-LABEL: test_vsoxseg3_nxv8f16_nxv8i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
; CHECK-NEXT: vsoxseg3ei16.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_mask_nxv8f16_nxv8i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
; CHECK-NEXT: vsoxseg3ei16.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_nxv8f16_nxv8i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
; CHECK-NEXT: vsoxseg3ei8.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_mask_nxv8f16_nxv8i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
; CHECK-NEXT: vsoxseg3ei8.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
define void @test_vsoxseg3_nxv8f16_nxv8i32(<vscale x 8 x half> %val, half* %base, <vscale x 8 x i32> %index, i32 %vl) {
; CHECK-LABEL: test_vsoxseg3_nxv8f16_nxv8i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv2r.v v16, v8
-; CHECK-NEXT: vmv2r.v v18, v16
-; CHECK-NEXT: vmv2r.v v20, v16
+; CHECK-NEXT: vmv2r.v v10, v8
+; CHECK-NEXT: vmv4r.v v16, v12
+; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
-; CHECK-NEXT: vsoxseg3ei32.v v16, (a0), v12
+; CHECK-NEXT: vsoxseg3ei32.v v8, (a0), v16
; CHECK-NEXT: ret
entry:
tail call void @llvm.riscv.vsoxseg3.nxv8f16.nxv8i32(<vscale x 8 x half> %val,<vscale x 8 x half> %val,<vscale x 8 x half> %val, half* %base, <vscale x 8 x i32> %index, i32 %vl)
define void @test_vsoxseg3_mask_nxv8f16_nxv8i32(<vscale x 8 x half> %val, half* %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsoxseg3_mask_nxv8f16_nxv8i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv2r.v v16, v8
-; CHECK-NEXT: vmv2r.v v18, v16
-; CHECK-NEXT: vmv2r.v v20, v16
+; CHECK-NEXT: vmv2r.v v10, v8
+; CHECK-NEXT: vmv4r.v v16, v12
+; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
-; CHECK-NEXT: vsoxseg3ei32.v v16, (a0), v12, v0.t
+; CHECK-NEXT: vsoxseg3ei32.v v8, (a0), v16, v0.t
; CHECK-NEXT: ret
entry:
tail call void @llvm.riscv.vsoxseg3.mask.nxv8f16.nxv8i32(<vscale x 8 x half> %val,<vscale x 8 x half> %val,<vscale x 8 x half> %val, half* %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i32 %vl)
; CHECK-LABEL: test_vsoxseg4_nxv8f16_nxv8i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
; CHECK-NEXT: vsoxseg4ei16.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_mask_nxv8f16_nxv8i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
; CHECK-NEXT: vsoxseg4ei16.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_nxv8f16_nxv8i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
; CHECK-NEXT: vsoxseg4ei8.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_mask_nxv8f16_nxv8i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
; CHECK-NEXT: vsoxseg4ei8.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_nxv8f16_nxv8i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v16, v8
-; CHECK-NEXT: vmv2r.v v18, v16
-; CHECK-NEXT: vmv2r.v v20, v16
-; CHECK-NEXT: vmv2r.v v22, v16
+; CHECK-NEXT: vmv2r.v v18, v8
+; CHECK-NEXT: vmv2r.v v20, v8
+; CHECK-NEXT: vmv2r.v v22, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
; CHECK-NEXT: vsoxseg4ei32.v v16, (a0), v12
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_mask_nxv8f16_nxv8i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v16, v8
-; CHECK-NEXT: vmv2r.v v18, v16
-; CHECK-NEXT: vmv2r.v v20, v16
-; CHECK-NEXT: vmv2r.v v22, v16
+; CHECK-NEXT: vmv2r.v v18, v8
+; CHECK-NEXT: vmv2r.v v20, v8
+; CHECK-NEXT: vmv2r.v v22, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
; CHECK-NEXT: vsoxseg4ei32.v v16, (a0), v12, v0.t
; CHECK-NEXT: ret
define void @test_vsoxseg2_nxv8f32_nxv8i16(<vscale x 8 x float> %val, float* %base, <vscale x 8 x i16> %index, i32 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv8f32_nxv8i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
; CHECK-NEXT: vmv2r.v v16, v12
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
define void @test_vsoxseg2_mask_nxv8f32_nxv8i16(<vscale x 8 x float> %val, float* %base, <vscale x 8 x i16> %index, <vscale x 8 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv8f32_nxv8i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
; CHECK-NEXT: vmv2r.v v16, v12
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
define void @test_vsoxseg2_nxv8f32_nxv8i8(<vscale x 8 x float> %val, float* %base, <vscale x 8 x i8> %index, i32 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv8f32_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
; CHECK-NEXT: vmv1r.v v16, v12
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
define void @test_vsoxseg2_mask_nxv8f32_nxv8i8(<vscale x 8 x float> %val, float* %base, <vscale x 8 x i8> %index, <vscale x 8 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv8f32_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
; CHECK-NEXT: vmv1r.v v16, v12
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
define void @test_vsoxseg2_nxv8f32_nxv8i32(<vscale x 8 x float> %val, float* %base, <vscale x 8 x i32> %index, i32 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv8f32_nxv8i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
; CHECK-NEXT: vmv4r.v v16, v12
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
define void @test_vsoxseg2_mask_nxv8f32_nxv8i32(<vscale x 8 x float> %val, float* %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv8f32_nxv8i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
; CHECK-NEXT: vmv4r.v v16, v12
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
define void @test_vsoxseg2_nxv2f64_nxv2i32(<vscale x 2 x double> %val, double* %base, <vscale x 2 x i32> %index, i32 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv2f64_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
; CHECK-NEXT: vmv1r.v v12, v10
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
define void @test_vsoxseg2_mask_nxv2f64_nxv2i32(<vscale x 2 x double> %val, double* %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv2f64_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
; CHECK-NEXT: vmv1r.v v12, v10
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
define void @test_vsoxseg2_nxv2f64_nxv2i8(<vscale x 2 x double> %val, double* %base, <vscale x 2 x i8> %index, i32 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv2f64_nxv2i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
; CHECK-NEXT: vmv1r.v v12, v10
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
define void @test_vsoxseg2_mask_nxv2f64_nxv2i8(<vscale x 2 x double> %val, double* %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv2f64_nxv2i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
; CHECK-NEXT: vmv1r.v v12, v10
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
define void @test_vsoxseg2_nxv2f64_nxv2i16(<vscale x 2 x double> %val, double* %base, <vscale x 2 x i16> %index, i32 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv2f64_nxv2i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
; CHECK-NEXT: vmv1r.v v12, v10
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
define void @test_vsoxseg2_mask_nxv2f64_nxv2i16(<vscale x 2 x double> %val, double* %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv2f64_nxv2i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
; CHECK-NEXT: vmv1r.v v12, v10
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
; CHECK-LABEL: test_vsoxseg3_nxv2f64_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
; CHECK-NEXT: vsoxseg3ei32.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_mask_nxv2f64_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
; CHECK-NEXT: vsoxseg3ei32.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_nxv2f64_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
; CHECK-NEXT: vsoxseg3ei8.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_mask_nxv2f64_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
; CHECK-NEXT: vsoxseg3ei8.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_nxv2f64_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
; CHECK-NEXT: vsoxseg3ei16.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_mask_nxv2f64_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
; CHECK-NEXT: vsoxseg3ei16.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_nxv2f64_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
; CHECK-NEXT: vsoxseg4ei32.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_mask_nxv2f64_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
; CHECK-NEXT: vsoxseg4ei32.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_nxv2f64_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
; CHECK-NEXT: vsoxseg4ei8.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_mask_nxv2f64_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
; CHECK-NEXT: vsoxseg4ei8.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_nxv2f64_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
; CHECK-NEXT: vsoxseg4ei16.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_mask_nxv2f64_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
; CHECK-NEXT: vsoxseg4ei16.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
define void @test_vsoxseg2_nxv4f16_nxv4i16(<vscale x 4 x half> %val, half* %base, <vscale x 4 x i16> %index, i32 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv4f16_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
define void @test_vsoxseg2_mask_nxv4f16_nxv4i16(<vscale x 4 x half> %val, half* %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv4f16_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
define void @test_vsoxseg2_nxv4f16_nxv4i8(<vscale x 4 x half> %val, half* %base, <vscale x 4 x i8> %index, i32 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv4f16_nxv4i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
define void @test_vsoxseg2_mask_nxv4f16_nxv4i8(<vscale x 4 x half> %val, half* %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv4f16_nxv4i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
define void @test_vsoxseg2_nxv4f16_nxv4i32(<vscale x 4 x half> %val, half* %base, <vscale x 4 x i32> %index, i32 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv4f16_nxv4i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v10
define void @test_vsoxseg2_mask_nxv4f16_nxv4i32(<vscale x 4 x half> %val, half* %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv4f16_nxv4i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v10, v0.t
; CHECK-LABEL: test_vsoxseg3_nxv4f16_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vsoxseg3ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_mask_nxv4f16_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vsoxseg3ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_nxv4f16_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vsoxseg3ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_mask_nxv4f16_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vsoxseg3ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
define void @test_vsoxseg3_nxv4f16_nxv4i32(<vscale x 4 x half> %val, half* %base, <vscale x 4 x i32> %index, i32 %vl) {
; CHECK-LABEL: test_vsoxseg3_nxv4f16_nxv4i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv2r.v v12, v10
+; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
-; CHECK-NEXT: vsoxseg3ei32.v v12, (a0), v10
+; CHECK-NEXT: vsoxseg3ei32.v v8, (a0), v12
; CHECK-NEXT: ret
entry:
tail call void @llvm.riscv.vsoxseg3.nxv4f16.nxv4i32(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, <vscale x 4 x i32> %index, i32 %vl)
define void @test_vsoxseg3_mask_nxv4f16_nxv4i32(<vscale x 4 x half> %val, half* %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsoxseg3_mask_nxv4f16_nxv4i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv2r.v v12, v10
+; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
-; CHECK-NEXT: vsoxseg3ei32.v v12, (a0), v10, v0.t
+; CHECK-NEXT: vsoxseg3ei32.v v8, (a0), v12, v0.t
; CHECK-NEXT: ret
entry:
tail call void @llvm.riscv.vsoxseg3.mask.nxv4f16.nxv4i32(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl)
; CHECK-LABEL: test_vsoxseg4_nxv4f16_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vsoxseg4ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_mask_nxv4f16_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vsoxseg4ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_nxv4f16_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vsoxseg4ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_mask_nxv4f16_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vsoxseg4ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_nxv4f16_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vsoxseg4ei32.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_mask_nxv4f16_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vsoxseg4ei32.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_nxv4f16_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vsoxseg5ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_mask_nxv4f16_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vsoxseg5ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_nxv4f16_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vsoxseg5ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_mask_nxv4f16_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vsoxseg5ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_nxv4f16_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vsoxseg5ei32.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_mask_nxv4f16_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vsoxseg5ei32.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_nxv4f16_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vsoxseg6ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_mask_nxv4f16_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vsoxseg6ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_nxv4f16_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vsoxseg6ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_mask_nxv4f16_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vsoxseg6ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_nxv4f16_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vsoxseg6ei32.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_mask_nxv4f16_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vsoxseg6ei32.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_nxv4f16_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vsoxseg7ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_mask_nxv4f16_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vsoxseg7ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_nxv4f16_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vsoxseg7ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_mask_nxv4f16_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vsoxseg7ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_nxv4f16_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
-; CHECK-NEXT: vmv1r.v v18, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vsoxseg7ei32.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_mask_nxv4f16_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
-; CHECK-NEXT: vmv1r.v v18, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vsoxseg7ei32.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_nxv4f16_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vsoxseg8ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_mask_nxv4f16_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vsoxseg8ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_nxv4f16_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vsoxseg8ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_mask_nxv4f16_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vsoxseg8ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_nxv4f16_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
-; CHECK-NEXT: vmv1r.v v18, v12
-; CHECK-NEXT: vmv1r.v v19, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
+; CHECK-NEXT: vmv1r.v v19, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vsoxseg8ei32.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_mask_nxv4f16_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
-; CHECK-NEXT: vmv1r.v v18, v12
-; CHECK-NEXT: vmv1r.v v19, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
+; CHECK-NEXT: vmv1r.v v19, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vsoxseg8ei32.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
define void @test_vsoxseg2_nxv2f16_nxv2i32(<vscale x 2 x half> %val, half* %base, <vscale x 2 x i32> %index, i32 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv2f16_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
define void @test_vsoxseg2_mask_nxv2f16_nxv2i32(<vscale x 2 x half> %val, half* %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv2f16_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
define void @test_vsoxseg2_nxv2f16_nxv2i8(<vscale x 2 x half> %val, half* %base, <vscale x 2 x i8> %index, i32 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv2f16_nxv2i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
define void @test_vsoxseg2_mask_nxv2f16_nxv2i8(<vscale x 2 x half> %val, half* %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv2f16_nxv2i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
define void @test_vsoxseg2_nxv2f16_nxv2i16(<vscale x 2 x half> %val, half* %base, <vscale x 2 x i16> %index, i32 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv2f16_nxv2i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
define void @test_vsoxseg2_mask_nxv2f16_nxv2i16(<vscale x 2 x half> %val, half* %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv2f16_nxv2i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-LABEL: test_vsoxseg3_nxv2f16_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vsoxseg3ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_mask_nxv2f16_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vsoxseg3ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_nxv2f16_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vsoxseg3ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_mask_nxv2f16_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vsoxseg3ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_nxv2f16_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vsoxseg3ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_mask_nxv2f16_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vsoxseg3ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_nxv2f16_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vsoxseg4ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_mask_nxv2f16_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vsoxseg4ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_nxv2f16_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vsoxseg4ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_mask_nxv2f16_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vsoxseg4ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_nxv2f16_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vsoxseg4ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_mask_nxv2f16_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vsoxseg4ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_nxv2f16_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vsoxseg5ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_mask_nxv2f16_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vsoxseg5ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_nxv2f16_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vsoxseg5ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_mask_nxv2f16_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vsoxseg5ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_nxv2f16_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vsoxseg5ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_mask_nxv2f16_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vsoxseg5ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_nxv2f16_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vsoxseg6ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_mask_nxv2f16_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vsoxseg6ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_nxv2f16_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vsoxseg6ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_mask_nxv2f16_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vsoxseg6ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_nxv2f16_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vsoxseg6ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_mask_nxv2f16_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vsoxseg6ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_nxv2f16_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vsoxseg7ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_mask_nxv2f16_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vsoxseg7ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_nxv2f16_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vsoxseg7ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_mask_nxv2f16_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vsoxseg7ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_nxv2f16_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vsoxseg7ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_mask_nxv2f16_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vsoxseg7ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_nxv2f16_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vsoxseg8ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_mask_nxv2f16_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vsoxseg8ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_nxv2f16_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vsoxseg8ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_mask_nxv2f16_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vsoxseg8ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_nxv2f16_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vsoxseg8ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_mask_nxv2f16_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vsoxseg8ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
define void @test_vsoxseg2_nxv4f32_nxv4i16(<vscale x 4 x float> %val, float* %base, <vscale x 4 x i16> %index, i32 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv4f32_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
; CHECK-NEXT: vmv1r.v v12, v10
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
define void @test_vsoxseg2_mask_nxv4f32_nxv4i16(<vscale x 4 x float> %val, float* %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv4f32_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
; CHECK-NEXT: vmv1r.v v12, v10
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
define void @test_vsoxseg2_nxv4f32_nxv4i8(<vscale x 4 x float> %val, float* %base, <vscale x 4 x i8> %index, i32 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv4f32_nxv4i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
; CHECK-NEXT: vmv1r.v v12, v10
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
define void @test_vsoxseg2_mask_nxv4f32_nxv4i8(<vscale x 4 x float> %val, float* %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv4f32_nxv4i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
; CHECK-NEXT: vmv1r.v v12, v10
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
define void @test_vsoxseg2_nxv4f32_nxv4i32(<vscale x 4 x float> %val, float* %base, <vscale x 4 x i32> %index, i32 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv4f32_nxv4i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
; CHECK-NEXT: vmv2r.v v12, v10
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
define void @test_vsoxseg2_mask_nxv4f32_nxv4i32(<vscale x 4 x float> %val, float* %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv4f32_nxv4i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
; CHECK-NEXT: vmv2r.v v12, v10
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; CHECK-LABEL: test_vsoxseg3_nxv4f32_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; CHECK-NEXT: vsoxseg3ei16.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_mask_nxv4f32_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; CHECK-NEXT: vsoxseg3ei16.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_nxv4f32_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; CHECK-NEXT: vsoxseg3ei8.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_mask_nxv4f32_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; CHECK-NEXT: vsoxseg3ei8.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_nxv4f32_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; CHECK-NEXT: vsoxseg3ei32.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_mask_nxv4f32_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; CHECK-NEXT: vsoxseg3ei32.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_nxv4f32_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; CHECK-NEXT: vsoxseg4ei16.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_mask_nxv4f32_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; CHECK-NEXT: vsoxseg4ei16.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_nxv4f32_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; CHECK-NEXT: vsoxseg4ei8.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_mask_nxv4f32_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; CHECK-NEXT: vsoxseg4ei8.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_nxv4f32_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; CHECK-NEXT: vsoxseg4ei32.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_mask_nxv4f32_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; CHECK-NEXT: vsoxseg4ei32.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
define void @test_vsoxseg2_nxv16i16_nxv16i16(<vscale x 16 x i16> %val, i16* %base, <vscale x 16 x i16> %index, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv16i16_nxv16i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
; CHECK-NEXT: vmv4r.v v16, v12
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
define void @test_vsoxseg2_mask_nxv16i16_nxv16i16(<vscale x 16 x i16> %val, i16* %base, <vscale x 16 x i16> %index, <vscale x 16 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv16i16_nxv16i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
; CHECK-NEXT: vmv4r.v v16, v12
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
define void @test_vsoxseg2_nxv16i16_nxv16i8(<vscale x 16 x i16> %val, i16* %base, <vscale x 16 x i8> %index, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv16i16_nxv16i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
; CHECK-NEXT: vmv2r.v v16, v12
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
define void @test_vsoxseg2_mask_nxv16i16_nxv16i8(<vscale x 16 x i16> %val, i16* %base, <vscale x 16 x i8> %index, <vscale x 16 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv16i16_nxv16i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
; CHECK-NEXT: vmv2r.v v16, v12
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
define void @test_vsoxseg2_nxv16i16_nxv16i32(<vscale x 16 x i16> %val, i16* %base, <vscale x 16 x i32> %index, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv16i16_nxv16i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v16
define void @test_vsoxseg2_mask_nxv16i16_nxv16i32(<vscale x 16 x i16> %val, i16* %base, <vscale x 16 x i32> %index, <vscale x 16 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv16i16_nxv16i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v16, v0.t
define void @test_vsoxseg2_nxv4i32_nxv4i32(<vscale x 4 x i32> %val, i32* %base, <vscale x 4 x i32> %index, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv4i32_nxv4i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
; CHECK-NEXT: vmv2r.v v12, v10
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
define void @test_vsoxseg2_mask_nxv4i32_nxv4i32(<vscale x 4 x i32> %val, i32* %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv4i32_nxv4i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
; CHECK-NEXT: vmv2r.v v12, v10
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
define void @test_vsoxseg2_nxv4i32_nxv4i8(<vscale x 4 x i32> %val, i32* %base, <vscale x 4 x i8> %index, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv4i32_nxv4i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
; CHECK-NEXT: vmv1r.v v12, v10
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
define void @test_vsoxseg2_mask_nxv4i32_nxv4i8(<vscale x 4 x i32> %val, i32* %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv4i32_nxv4i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
; CHECK-NEXT: vmv1r.v v12, v10
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
define void @test_vsoxseg2_nxv4i32_nxv4i64(<vscale x 4 x i32> %val, i32* %base, <vscale x 4 x i64> %index, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv4i32_nxv4i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; CHECK-NEXT: vsoxseg2ei64.v v8, (a0), v12
define void @test_vsoxseg2_mask_nxv4i32_nxv4i64(<vscale x 4 x i32> %val, i32* %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv4i32_nxv4i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; CHECK-NEXT: vsoxseg2ei64.v v8, (a0), v12, v0.t
define void @test_vsoxseg2_nxv4i32_nxv4i16(<vscale x 4 x i32> %val, i32* %base, <vscale x 4 x i16> %index, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv4i32_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
; CHECK-NEXT: vmv1r.v v12, v10
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
define void @test_vsoxseg2_mask_nxv4i32_nxv4i16(<vscale x 4 x i32> %val, i32* %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv4i32_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
; CHECK-NEXT: vmv1r.v v12, v10
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; CHECK-LABEL: test_vsoxseg3_nxv4i32_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; CHECK-NEXT: vsoxseg3ei32.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_mask_nxv4i32_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; CHECK-NEXT: vsoxseg3ei32.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_nxv4i32_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; CHECK-NEXT: vsoxseg3ei8.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_mask_nxv4i32_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; CHECK-NEXT: vsoxseg3ei8.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
define void @test_vsoxseg3_nxv4i32_nxv4i64(<vscale x 4 x i32> %val, i32* %base, <vscale x 4 x i64> %index, i64 %vl) {
; CHECK-LABEL: test_vsoxseg3_nxv4i32_nxv4i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv2r.v v16, v8
-; CHECK-NEXT: vmv2r.v v18, v16
-; CHECK-NEXT: vmv2r.v v20, v16
+; CHECK-NEXT: vmv2r.v v10, v8
+; CHECK-NEXT: vmv4r.v v16, v12
+; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
-; CHECK-NEXT: vsoxseg3ei64.v v16, (a0), v12
+; CHECK-NEXT: vsoxseg3ei64.v v8, (a0), v16
; CHECK-NEXT: ret
entry:
tail call void @llvm.riscv.vsoxseg3.nxv4i32.nxv4i64(<vscale x 4 x i32> %val,<vscale x 4 x i32> %val,<vscale x 4 x i32> %val, i32* %base, <vscale x 4 x i64> %index, i64 %vl)
define void @test_vsoxseg3_mask_nxv4i32_nxv4i64(<vscale x 4 x i32> %val, i32* %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsoxseg3_mask_nxv4i32_nxv4i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv2r.v v16, v8
-; CHECK-NEXT: vmv2r.v v18, v16
-; CHECK-NEXT: vmv2r.v v20, v16
+; CHECK-NEXT: vmv2r.v v10, v8
+; CHECK-NEXT: vmv4r.v v16, v12
+; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
-; CHECK-NEXT: vsoxseg3ei64.v v16, (a0), v12, v0.t
+; CHECK-NEXT: vsoxseg3ei64.v v8, (a0), v16, v0.t
; CHECK-NEXT: ret
entry:
tail call void @llvm.riscv.vsoxseg3.mask.nxv4i32.nxv4i64(<vscale x 4 x i32> %val,<vscale x 4 x i32> %val,<vscale x 4 x i32> %val, i32* %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl)
; CHECK-LABEL: test_vsoxseg3_nxv4i32_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; CHECK-NEXT: vsoxseg3ei16.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_mask_nxv4i32_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; CHECK-NEXT: vsoxseg3ei16.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_nxv4i32_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; CHECK-NEXT: vsoxseg4ei32.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_mask_nxv4i32_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; CHECK-NEXT: vsoxseg4ei32.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_nxv4i32_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; CHECK-NEXT: vsoxseg4ei8.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_mask_nxv4i32_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; CHECK-NEXT: vsoxseg4ei8.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_nxv4i32_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v16, v8
-; CHECK-NEXT: vmv2r.v v18, v16
-; CHECK-NEXT: vmv2r.v v20, v16
-; CHECK-NEXT: vmv2r.v v22, v16
+; CHECK-NEXT: vmv2r.v v18, v8
+; CHECK-NEXT: vmv2r.v v20, v8
+; CHECK-NEXT: vmv2r.v v22, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; CHECK-NEXT: vsoxseg4ei64.v v16, (a0), v12
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_mask_nxv4i32_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v16, v8
-; CHECK-NEXT: vmv2r.v v18, v16
-; CHECK-NEXT: vmv2r.v v20, v16
-; CHECK-NEXT: vmv2r.v v22, v16
+; CHECK-NEXT: vmv2r.v v18, v8
+; CHECK-NEXT: vmv2r.v v20, v8
+; CHECK-NEXT: vmv2r.v v22, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; CHECK-NEXT: vsoxseg4ei64.v v16, (a0), v12, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_nxv4i32_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; CHECK-NEXT: vsoxseg4ei16.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_mask_nxv4i32_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; CHECK-NEXT: vsoxseg4ei16.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
define void @test_vsoxseg2_nxv16i8_nxv16i16(<vscale x 16 x i8> %val, i8* %base, <vscale x 16 x i16> %index, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv16i8_nxv16i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v12
define void @test_vsoxseg2_mask_nxv16i8_nxv16i16(<vscale x 16 x i8> %val, i8* %base, <vscale x 16 x i16> %index, <vscale x 16 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv16i8_nxv16i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v12, v0.t
define void @test_vsoxseg2_nxv16i8_nxv16i8(<vscale x 16 x i8> %val, i8* %base, <vscale x 16 x i8> %index, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv16i8_nxv16i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
; CHECK-NEXT: vmv2r.v v12, v10
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
define void @test_vsoxseg2_mask_nxv16i8_nxv16i8(<vscale x 16 x i8> %val, i8* %base, <vscale x 16 x i8> %index, <vscale x 16 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv16i8_nxv16i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
; CHECK-NEXT: vmv2r.v v12, v10
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
define void @test_vsoxseg2_nxv16i8_nxv16i32(<vscale x 16 x i8> %val, i8* %base, <vscale x 16 x i32> %index, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv16i8_nxv16i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v16
define void @test_vsoxseg2_mask_nxv16i8_nxv16i32(<vscale x 16 x i8> %val, i8* %base, <vscale x 16 x i32> %index, <vscale x 16 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv16i8_nxv16i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v16, v0.t
define void @test_vsoxseg3_nxv16i8_nxv16i16(<vscale x 16 x i8> %val, i8* %base, <vscale x 16 x i16> %index, i64 %vl) {
; CHECK-LABEL: test_vsoxseg3_nxv16i8_nxv16i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv2r.v v16, v8
-; CHECK-NEXT: vmv2r.v v18, v16
-; CHECK-NEXT: vmv2r.v v20, v16
+; CHECK-NEXT: vmv2r.v v10, v8
+; CHECK-NEXT: vmv4r.v v16, v12
+; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
-; CHECK-NEXT: vsoxseg3ei16.v v16, (a0), v12
+; CHECK-NEXT: vsoxseg3ei16.v v8, (a0), v16
; CHECK-NEXT: ret
entry:
tail call void @llvm.riscv.vsoxseg3.nxv16i8.nxv16i16(<vscale x 16 x i8> %val,<vscale x 16 x i8> %val,<vscale x 16 x i8> %val, i8* %base, <vscale x 16 x i16> %index, i64 %vl)
define void @test_vsoxseg3_mask_nxv16i8_nxv16i16(<vscale x 16 x i8> %val, i8* %base, <vscale x 16 x i16> %index, <vscale x 16 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsoxseg3_mask_nxv16i8_nxv16i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv2r.v v16, v8
-; CHECK-NEXT: vmv2r.v v18, v16
-; CHECK-NEXT: vmv2r.v v20, v16
+; CHECK-NEXT: vmv2r.v v10, v8
+; CHECK-NEXT: vmv4r.v v16, v12
+; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
-; CHECK-NEXT: vsoxseg3ei16.v v16, (a0), v12, v0.t
+; CHECK-NEXT: vsoxseg3ei16.v v8, (a0), v16, v0.t
; CHECK-NEXT: ret
entry:
tail call void @llvm.riscv.vsoxseg3.mask.nxv16i8.nxv16i16(<vscale x 16 x i8> %val,<vscale x 16 x i8> %val,<vscale x 16 x i8> %val, i8* %base, <vscale x 16 x i16> %index, <vscale x 16 x i1> %mask, i64 %vl)
; CHECK-LABEL: test_vsoxseg3_nxv16i8_nxv16i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
; CHECK-NEXT: vsoxseg3ei8.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_mask_nxv16i8_nxv16i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
; CHECK-NEXT: vsoxseg3ei8.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
define void @test_vsoxseg3_nxv16i8_nxv16i32(<vscale x 16 x i8> %val, i8* %base, <vscale x 16 x i32> %index, i64 %vl) {
; CHECK-LABEL: test_vsoxseg3_nxv16i8_nxv16i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
define void @test_vsoxseg3_mask_nxv16i8_nxv16i32(<vscale x 16 x i8> %val, i8* %base, <vscale x 16 x i32> %index, <vscale x 16 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsoxseg3_mask_nxv16i8_nxv16i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
; CHECK-LABEL: test_vsoxseg4_nxv16i8_nxv16i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v16, v8
-; CHECK-NEXT: vmv2r.v v18, v16
-; CHECK-NEXT: vmv2r.v v20, v16
-; CHECK-NEXT: vmv2r.v v22, v16
+; CHECK-NEXT: vmv2r.v v18, v8
+; CHECK-NEXT: vmv2r.v v20, v8
+; CHECK-NEXT: vmv2r.v v22, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
; CHECK-NEXT: vsoxseg4ei16.v v16, (a0), v12
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_mask_nxv16i8_nxv16i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v16, v8
-; CHECK-NEXT: vmv2r.v v18, v16
-; CHECK-NEXT: vmv2r.v v20, v16
-; CHECK-NEXT: vmv2r.v v22, v16
+; CHECK-NEXT: vmv2r.v v18, v8
+; CHECK-NEXT: vmv2r.v v20, v8
+; CHECK-NEXT: vmv2r.v v22, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
; CHECK-NEXT: vsoxseg4ei16.v v16, (a0), v12, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_nxv16i8_nxv16i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
; CHECK-NEXT: vsoxseg4ei8.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_mask_nxv16i8_nxv16i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
; CHECK-NEXT: vsoxseg4ei8.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
define void @test_vsoxseg4_nxv16i8_nxv16i32(<vscale x 16 x i8> %val, i8* %base, <vscale x 16 x i32> %index, i64 %vl) {
; CHECK-LABEL: test_vsoxseg4_nxv16i8_nxv16i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2_v14m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vmv2r.v v14, v8
define void @test_vsoxseg4_mask_nxv16i8_nxv16i32(<vscale x 16 x i8> %val, i8* %base, <vscale x 16 x i32> %index, <vscale x 16 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsoxseg4_mask_nxv16i8_nxv16i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2_v14m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vmv2r.v v14, v8
define void @test_vsoxseg2_nxv1i64_nxv1i64(<vscale x 1 x i64> %val, i64* %base, <vscale x 1 x i64> %index, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv1i64_nxv1i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
define void @test_vsoxseg2_mask_nxv1i64_nxv1i64(<vscale x 1 x i64> %val, i64* %base, <vscale x 1 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv1i64_nxv1i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
define void @test_vsoxseg2_nxv1i64_nxv1i32(<vscale x 1 x i64> %val, i64* %base, <vscale x 1 x i32> %index, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv1i64_nxv1i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
define void @test_vsoxseg2_mask_nxv1i64_nxv1i32(<vscale x 1 x i64> %val, i64* %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv1i64_nxv1i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
define void @test_vsoxseg2_nxv1i64_nxv1i16(<vscale x 1 x i64> %val, i64* %base, <vscale x 1 x i16> %index, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv1i64_nxv1i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
define void @test_vsoxseg2_mask_nxv1i64_nxv1i16(<vscale x 1 x i64> %val, i64* %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv1i64_nxv1i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
define void @test_vsoxseg2_nxv1i64_nxv1i8(<vscale x 1 x i64> %val, i64* %base, <vscale x 1 x i8> %index, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv1i64_nxv1i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
define void @test_vsoxseg2_mask_nxv1i64_nxv1i8(<vscale x 1 x i64> %val, i64* %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv1i64_nxv1i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-LABEL: test_vsoxseg3_nxv1i64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vsoxseg3ei64.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_mask_nxv1i64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vsoxseg3ei64.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_nxv1i64_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vsoxseg3ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_mask_nxv1i64_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vsoxseg3ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_nxv1i64_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vsoxseg3ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_mask_nxv1i64_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vsoxseg3ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_nxv1i64_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vsoxseg3ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_mask_nxv1i64_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vsoxseg3ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_nxv1i64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vsoxseg4ei64.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_mask_nxv1i64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vsoxseg4ei64.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_nxv1i64_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vsoxseg4ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_mask_nxv1i64_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vsoxseg4ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_nxv1i64_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vsoxseg4ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_mask_nxv1i64_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vsoxseg4ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_nxv1i64_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vsoxseg4ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_mask_nxv1i64_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vsoxseg4ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_nxv1i64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vsoxseg5ei64.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_mask_nxv1i64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vsoxseg5ei64.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_nxv1i64_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vsoxseg5ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_mask_nxv1i64_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vsoxseg5ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_nxv1i64_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vsoxseg5ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_mask_nxv1i64_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vsoxseg5ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_nxv1i64_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vsoxseg5ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_mask_nxv1i64_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vsoxseg5ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_nxv1i64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vsoxseg6ei64.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_mask_nxv1i64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vsoxseg6ei64.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_nxv1i64_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vsoxseg6ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_mask_nxv1i64_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vsoxseg6ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_nxv1i64_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vsoxseg6ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_mask_nxv1i64_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vsoxseg6ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_nxv1i64_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vsoxseg6ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_mask_nxv1i64_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vsoxseg6ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_nxv1i64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vsoxseg7ei64.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_mask_nxv1i64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vsoxseg7ei64.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_nxv1i64_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vsoxseg7ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_mask_nxv1i64_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vsoxseg7ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_nxv1i64_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vsoxseg7ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_mask_nxv1i64_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vsoxseg7ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_nxv1i64_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
-; CHECK-NEXT: vsoxseg7ei8.v v10, (a0), v9
-; CHECK-NEXT: ret
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
+; CHECK-NEXT: vsoxseg7ei8.v v10, (a0), v9
+; CHECK-NEXT: ret
entry:
tail call void @llvm.riscv.vsoxseg7.nxv1i64.nxv1i8(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, i64* %base, <vscale x 1 x i8> %index, i64 %vl)
ret void
; CHECK-LABEL: test_vsoxseg7_mask_nxv1i64_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vsoxseg7ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_nxv1i64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vsoxseg8ei64.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_mask_nxv1i64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vsoxseg8ei64.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_nxv1i64_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vsoxseg8ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_mask_nxv1i64_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vsoxseg8ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_nxv1i64_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vsoxseg8ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_mask_nxv1i64_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vsoxseg8ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_nxv1i64_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vsoxseg8ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_mask_nxv1i64_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vsoxseg8ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
define void @test_vsoxseg2_nxv1i32_nxv1i64(<vscale x 1 x i32> %val, i32* %base, <vscale x 1 x i64> %index, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv1i32_nxv1i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
define void @test_vsoxseg2_mask_nxv1i32_nxv1i64(<vscale x 1 x i32> %val, i32* %base, <vscale x 1 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv1i32_nxv1i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
define void @test_vsoxseg2_nxv1i32_nxv1i32(<vscale x 1 x i32> %val, i32* %base, <vscale x 1 x i32> %index, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv1i32_nxv1i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
define void @test_vsoxseg2_mask_nxv1i32_nxv1i32(<vscale x 1 x i32> %val, i32* %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv1i32_nxv1i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
define void @test_vsoxseg2_nxv1i32_nxv1i16(<vscale x 1 x i32> %val, i32* %base, <vscale x 1 x i16> %index, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv1i32_nxv1i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
define void @test_vsoxseg2_mask_nxv1i32_nxv1i16(<vscale x 1 x i32> %val, i32* %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv1i32_nxv1i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
define void @test_vsoxseg2_nxv1i32_nxv1i8(<vscale x 1 x i32> %val, i32* %base, <vscale x 1 x i8> %index, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv1i32_nxv1i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
define void @test_vsoxseg2_mask_nxv1i32_nxv1i8(<vscale x 1 x i32> %val, i32* %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv1i32_nxv1i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-LABEL: test_vsoxseg3_nxv1i32_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vsoxseg3ei64.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_mask_nxv1i32_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vsoxseg3ei64.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_nxv1i32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vsoxseg3ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_mask_nxv1i32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vsoxseg3ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_nxv1i32_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vsoxseg3ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_mask_nxv1i32_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vsoxseg3ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_nxv1i32_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vsoxseg3ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_mask_nxv1i32_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vsoxseg3ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_nxv1i32_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vsoxseg4ei64.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_mask_nxv1i32_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vsoxseg4ei64.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_nxv1i32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vsoxseg4ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_mask_nxv1i32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vsoxseg4ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_nxv1i32_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vsoxseg4ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_mask_nxv1i32_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vsoxseg4ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_nxv1i32_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vsoxseg4ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_mask_nxv1i32_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vsoxseg4ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_nxv1i32_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vsoxseg5ei64.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_mask_nxv1i32_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vsoxseg5ei64.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_nxv1i32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vsoxseg5ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_mask_nxv1i32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vsoxseg5ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_nxv1i32_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vsoxseg5ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_mask_nxv1i32_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vsoxseg5ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_nxv1i32_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vsoxseg5ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_mask_nxv1i32_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vsoxseg5ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_nxv1i32_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vsoxseg6ei64.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_mask_nxv1i32_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vsoxseg6ei64.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_nxv1i32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vsoxseg6ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_mask_nxv1i32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vsoxseg6ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_nxv1i32_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vsoxseg6ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_mask_nxv1i32_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vsoxseg6ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_nxv1i32_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vsoxseg6ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_mask_nxv1i32_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vsoxseg6ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_nxv1i32_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vsoxseg7ei64.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_mask_nxv1i32_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vsoxseg7ei64.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_nxv1i32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vsoxseg7ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_mask_nxv1i32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vsoxseg7ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_nxv1i32_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vsoxseg7ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_mask_nxv1i32_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
-; CHECK-NEXT: vsoxseg7ei16.v v10, (a0), v9, v0.t
-; CHECK-NEXT: ret
-entry:
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
+; CHECK-NEXT: vsoxseg7ei16.v v10, (a0), v9, v0.t
+; CHECK-NEXT: ret
+entry:
tail call void @llvm.riscv.vsoxseg7.mask.nxv1i32.nxv1i16(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl)
ret void
}
; CHECK-LABEL: test_vsoxseg7_nxv1i32_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vsoxseg7ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_mask_nxv1i32_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vsoxseg7ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_nxv1i32_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vsoxseg8ei64.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_mask_nxv1i32_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vsoxseg8ei64.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_nxv1i32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vsoxseg8ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_mask_nxv1i32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vsoxseg8ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_nxv1i32_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vsoxseg8ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_mask_nxv1i32_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vsoxseg8ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_nxv1i32_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vsoxseg8ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_mask_nxv1i32_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vsoxseg8ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
define void @test_vsoxseg2_nxv8i16_nxv8i16(<vscale x 8 x i16> %val, i16* %base, <vscale x 8 x i16> %index, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv8i16_nxv8i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
; CHECK-NEXT: vmv2r.v v12, v10
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
define void @test_vsoxseg2_mask_nxv8i16_nxv8i16(<vscale x 8 x i16> %val, i16* %base, <vscale x 8 x i16> %index, <vscale x 8 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv8i16_nxv8i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
; CHECK-NEXT: vmv2r.v v12, v10
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
define void @test_vsoxseg2_nxv8i16_nxv8i8(<vscale x 8 x i16> %val, i16* %base, <vscale x 8 x i8> %index, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv8i16_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
; CHECK-NEXT: vmv1r.v v12, v10
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
define void @test_vsoxseg2_mask_nxv8i16_nxv8i8(<vscale x 8 x i16> %val, i16* %base, <vscale x 8 x i8> %index, <vscale x 8 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv8i16_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
; CHECK-NEXT: vmv1r.v v12, v10
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
define void @test_vsoxseg2_nxv8i16_nxv8i64(<vscale x 8 x i16> %val, i16* %base, <vscale x 8 x i64> %index, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv8i16_nxv8i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
; CHECK-NEXT: vsoxseg2ei64.v v8, (a0), v16
define void @test_vsoxseg2_mask_nxv8i16_nxv8i64(<vscale x 8 x i16> %val, i16* %base, <vscale x 8 x i64> %index, <vscale x 8 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv8i16_nxv8i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
; CHECK-NEXT: vsoxseg2ei64.v v8, (a0), v16, v0.t
define void @test_vsoxseg2_nxv8i16_nxv8i32(<vscale x 8 x i16> %val, i16* %base, <vscale x 8 x i32> %index, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv8i16_nxv8i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v12
define void @test_vsoxseg2_mask_nxv8i16_nxv8i32(<vscale x 8 x i16> %val, i16* %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv8i16_nxv8i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v12, v0.t
; CHECK-LABEL: test_vsoxseg3_nxv8i16_nxv8i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
; CHECK-NEXT: vsoxseg3ei16.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_mask_nxv8i16_nxv8i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
; CHECK-NEXT: vsoxseg3ei16.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_nxv8i16_nxv8i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
; CHECK-NEXT: vsoxseg3ei8.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_mask_nxv8i16_nxv8i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
; CHECK-NEXT: vsoxseg3ei8.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
define void @test_vsoxseg3_nxv8i16_nxv8i64(<vscale x 8 x i16> %val, i16* %base, <vscale x 8 x i64> %index, i64 %vl) {
; CHECK-LABEL: test_vsoxseg3_nxv8i16_nxv8i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
define void @test_vsoxseg3_mask_nxv8i16_nxv8i64(<vscale x 8 x i16> %val, i16* %base, <vscale x 8 x i64> %index, <vscale x 8 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsoxseg3_mask_nxv8i16_nxv8i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
define void @test_vsoxseg3_nxv8i16_nxv8i32(<vscale x 8 x i16> %val, i16* %base, <vscale x 8 x i32> %index, i64 %vl) {
; CHECK-LABEL: test_vsoxseg3_nxv8i16_nxv8i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv2r.v v16, v8
-; CHECK-NEXT: vmv2r.v v18, v16
-; CHECK-NEXT: vmv2r.v v20, v16
+; CHECK-NEXT: vmv2r.v v10, v8
+; CHECK-NEXT: vmv4r.v v16, v12
+; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
-; CHECK-NEXT: vsoxseg3ei32.v v16, (a0), v12
+; CHECK-NEXT: vsoxseg3ei32.v v8, (a0), v16
; CHECK-NEXT: ret
entry:
tail call void @llvm.riscv.vsoxseg3.nxv8i16.nxv8i32(<vscale x 8 x i16> %val,<vscale x 8 x i16> %val,<vscale x 8 x i16> %val, i16* %base, <vscale x 8 x i32> %index, i64 %vl)
define void @test_vsoxseg3_mask_nxv8i16_nxv8i32(<vscale x 8 x i16> %val, i16* %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsoxseg3_mask_nxv8i16_nxv8i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv2r.v v16, v8
-; CHECK-NEXT: vmv2r.v v18, v16
-; CHECK-NEXT: vmv2r.v v20, v16
+; CHECK-NEXT: vmv2r.v v10, v8
+; CHECK-NEXT: vmv4r.v v16, v12
+; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
-; CHECK-NEXT: vsoxseg3ei32.v v16, (a0), v12, v0.t
+; CHECK-NEXT: vsoxseg3ei32.v v8, (a0), v16, v0.t
; CHECK-NEXT: ret
entry:
tail call void @llvm.riscv.vsoxseg3.mask.nxv8i16.nxv8i32(<vscale x 8 x i16> %val,<vscale x 8 x i16> %val,<vscale x 8 x i16> %val, i16* %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i64 %vl)
; CHECK-LABEL: test_vsoxseg4_nxv8i16_nxv8i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
; CHECK-NEXT: vsoxseg4ei16.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_mask_nxv8i16_nxv8i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
; CHECK-NEXT: vsoxseg4ei16.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_nxv8i16_nxv8i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
; CHECK-NEXT: vsoxseg4ei8.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_mask_nxv8i16_nxv8i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
; CHECK-NEXT: vsoxseg4ei8.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
define void @test_vsoxseg4_nxv8i16_nxv8i64(<vscale x 8 x i16> %val, i16* %base, <vscale x 8 x i64> %index, i64 %vl) {
; CHECK-LABEL: test_vsoxseg4_nxv8i16_nxv8i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2_v14m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vmv2r.v v14, v8
define void @test_vsoxseg4_mask_nxv8i16_nxv8i64(<vscale x 8 x i16> %val, i16* %base, <vscale x 8 x i64> %index, <vscale x 8 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsoxseg4_mask_nxv8i16_nxv8i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2_v14m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vmv2r.v v14, v8
; CHECK-LABEL: test_vsoxseg4_nxv8i16_nxv8i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v16, v8
-; CHECK-NEXT: vmv2r.v v18, v16
-; CHECK-NEXT: vmv2r.v v20, v16
-; CHECK-NEXT: vmv2r.v v22, v16
+; CHECK-NEXT: vmv2r.v v18, v8
+; CHECK-NEXT: vmv2r.v v20, v8
+; CHECK-NEXT: vmv2r.v v22, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
; CHECK-NEXT: vsoxseg4ei32.v v16, (a0), v12
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_mask_nxv8i16_nxv8i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v16, v8
-; CHECK-NEXT: vmv2r.v v18, v16
-; CHECK-NEXT: vmv2r.v v20, v16
-; CHECK-NEXT: vmv2r.v v22, v16
+; CHECK-NEXT: vmv2r.v v18, v8
+; CHECK-NEXT: vmv2r.v v20, v8
+; CHECK-NEXT: vmv2r.v v22, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
; CHECK-NEXT: vsoxseg4ei32.v v16, (a0), v12, v0.t
; CHECK-NEXT: ret
define void @test_vsoxseg2_nxv4i8_nxv4i32(<vscale x 4 x i8> %val, i8* %base, <vscale x 4 x i32> %index, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv4i8_nxv4i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v10
define void @test_vsoxseg2_mask_nxv4i8_nxv4i32(<vscale x 4 x i8> %val, i8* %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv4i8_nxv4i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v10, v0.t
define void @test_vsoxseg2_nxv4i8_nxv4i8(<vscale x 4 x i8> %val, i8* %base, <vscale x 4 x i8> %index, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv4i8_nxv4i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
define void @test_vsoxseg2_mask_nxv4i8_nxv4i8(<vscale x 4 x i8> %val, i8* %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv4i8_nxv4i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
define void @test_vsoxseg2_nxv4i8_nxv4i64(<vscale x 4 x i8> %val, i8* %base, <vscale x 4 x i64> %index, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv4i8_nxv4i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vsoxseg2ei64.v v8, (a0), v12
define void @test_vsoxseg2_mask_nxv4i8_nxv4i64(<vscale x 4 x i8> %val, i8* %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv4i8_nxv4i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vsoxseg2ei64.v v8, (a0), v12, v0.t
define void @test_vsoxseg2_nxv4i8_nxv4i16(<vscale x 4 x i8> %val, i8* %base, <vscale x 4 x i16> %index, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv4i8_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
define void @test_vsoxseg2_mask_nxv4i8_nxv4i16(<vscale x 4 x i8> %val, i8* %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv4i8_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
define void @test_vsoxseg3_nxv4i8_nxv4i32(<vscale x 4 x i8> %val, i8* %base, <vscale x 4 x i32> %index, i64 %vl) {
; CHECK-LABEL: test_vsoxseg3_nxv4i8_nxv4i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv2r.v v12, v10
+; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
-; CHECK-NEXT: vsoxseg3ei32.v v12, (a0), v10
+; CHECK-NEXT: vsoxseg3ei32.v v8, (a0), v12
; CHECK-NEXT: ret
entry:
tail call void @llvm.riscv.vsoxseg3.nxv4i8.nxv4i32(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 4 x i32> %index, i64 %vl)
define void @test_vsoxseg3_mask_nxv4i8_nxv4i32(<vscale x 4 x i8> %val, i8* %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsoxseg3_mask_nxv4i8_nxv4i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv2r.v v12, v10
+; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
-; CHECK-NEXT: vsoxseg3ei32.v v12, (a0), v10, v0.t
+; CHECK-NEXT: vsoxseg3ei32.v v8, (a0), v12, v0.t
; CHECK-NEXT: ret
entry:
tail call void @llvm.riscv.vsoxseg3.mask.nxv4i8.nxv4i32(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl)
; CHECK-LABEL: test_vsoxseg3_nxv4i8_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vsoxseg3ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_mask_nxv4i8_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vsoxseg3ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
define void @test_vsoxseg3_nxv4i8_nxv4i64(<vscale x 4 x i8> %val, i8* %base, <vscale x 4 x i64> %index, i64 %vl) {
; CHECK-LABEL: test_vsoxseg3_nxv4i8_nxv4i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
define void @test_vsoxseg3_mask_nxv4i8_nxv4i64(<vscale x 4 x i8> %val, i8* %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsoxseg3_mask_nxv4i8_nxv4i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-LABEL: test_vsoxseg3_nxv4i8_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vsoxseg3ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_mask_nxv4i8_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vsoxseg3ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_nxv4i8_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vsoxseg4ei32.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_mask_nxv4i8_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vsoxseg4ei32.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_nxv4i8_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vsoxseg4ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_mask_nxv4i8_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vsoxseg4ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
define void @test_vsoxseg4_nxv4i8_nxv4i64(<vscale x 4 x i8> %val, i8* %base, <vscale x 4 x i64> %index, i64 %vl) {
; CHECK-LABEL: test_vsoxseg4_nxv4i8_nxv4i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsoxseg4_mask_nxv4i8_nxv4i64(<vscale x 4 x i8> %val, i8* %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsoxseg4_mask_nxv4i8_nxv4i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
; CHECK-LABEL: test_vsoxseg4_nxv4i8_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vsoxseg4ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_mask_nxv4i8_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vsoxseg4ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_nxv4i8_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vsoxseg5ei32.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_mask_nxv4i8_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vsoxseg5ei32.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_nxv4i8_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vsoxseg5ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_mask_nxv4i8_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vsoxseg5ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
define void @test_vsoxseg5_nxv4i8_nxv4i64(<vscale x 4 x i8> %val, i8* %base, <vscale x 4 x i64> %index, i64 %vl) {
; CHECK-LABEL: test_vsoxseg5_nxv4i8_nxv4i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v16, v8
-; CHECK-NEXT: vmv1r.v v17, v16
-; CHECK-NEXT: vmv1r.v v18, v16
-; CHECK-NEXT: vmv1r.v v19, v16
-; CHECK-NEXT: vmv1r.v v20, v16
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv4r.v v16, v12
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
-; CHECK-NEXT: vsoxseg5ei64.v v16, (a0), v12
+; CHECK-NEXT: vsoxseg5ei64.v v8, (a0), v16
; CHECK-NEXT: ret
entry:
tail call void @llvm.riscv.vsoxseg5.nxv4i8.nxv4i64(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 4 x i64> %index, i64 %vl)
define void @test_vsoxseg5_mask_nxv4i8_nxv4i64(<vscale x 4 x i8> %val, i8* %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsoxseg5_mask_nxv4i8_nxv4i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v16, v8
-; CHECK-NEXT: vmv1r.v v17, v16
-; CHECK-NEXT: vmv1r.v v18, v16
-; CHECK-NEXT: vmv1r.v v19, v16
-; CHECK-NEXT: vmv1r.v v20, v16
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv4r.v v16, v12
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
-; CHECK-NEXT: vsoxseg5ei64.v v16, (a0), v12, v0.t
+; CHECK-NEXT: vsoxseg5ei64.v v8, (a0), v16, v0.t
; CHECK-NEXT: ret
entry:
tail call void @llvm.riscv.vsoxseg5.mask.nxv4i8.nxv4i64(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl)
; CHECK-LABEL: test_vsoxseg5_nxv4i8_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vsoxseg5ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_mask_nxv4i8_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vsoxseg5ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_nxv4i8_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vsoxseg6ei32.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_mask_nxv4i8_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vsoxseg6ei32.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_nxv4i8_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vsoxseg6ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_mask_nxv4i8_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vsoxseg6ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_nxv4i8_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v16, v8
-; CHECK-NEXT: vmv1r.v v17, v16
-; CHECK-NEXT: vmv1r.v v18, v16
-; CHECK-NEXT: vmv1r.v v19, v16
-; CHECK-NEXT: vmv1r.v v20, v16
-; CHECK-NEXT: vmv1r.v v21, v16
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
+; CHECK-NEXT: vmv1r.v v19, v8
+; CHECK-NEXT: vmv1r.v v20, v8
+; CHECK-NEXT: vmv1r.v v21, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vsoxseg6ei64.v v16, (a0), v12
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_mask_nxv4i8_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v16, v8
-; CHECK-NEXT: vmv1r.v v17, v16
-; CHECK-NEXT: vmv1r.v v18, v16
-; CHECK-NEXT: vmv1r.v v19, v16
-; CHECK-NEXT: vmv1r.v v20, v16
-; CHECK-NEXT: vmv1r.v v21, v16
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
+; CHECK-NEXT: vmv1r.v v19, v8
+; CHECK-NEXT: vmv1r.v v20, v8
+; CHECK-NEXT: vmv1r.v v21, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vsoxseg6ei64.v v16, (a0), v12, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_nxv4i8_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vsoxseg6ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_mask_nxv4i8_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vsoxseg6ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_nxv4i8_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
-; CHECK-NEXT: vmv1r.v v18, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vsoxseg7ei32.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_mask_nxv4i8_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
-; CHECK-NEXT: vmv1r.v v18, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vsoxseg7ei32.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_nxv4i8_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vsoxseg7ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_mask_nxv4i8_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vsoxseg7ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_nxv4i8_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v16, v8
-; CHECK-NEXT: vmv1r.v v17, v16
-; CHECK-NEXT: vmv1r.v v18, v16
-; CHECK-NEXT: vmv1r.v v19, v16
-; CHECK-NEXT: vmv1r.v v20, v16
-; CHECK-NEXT: vmv1r.v v21, v16
-; CHECK-NEXT: vmv1r.v v22, v16
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
+; CHECK-NEXT: vmv1r.v v19, v8
+; CHECK-NEXT: vmv1r.v v20, v8
+; CHECK-NEXT: vmv1r.v v21, v8
+; CHECK-NEXT: vmv1r.v v22, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vsoxseg7ei64.v v16, (a0), v12
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_mask_nxv4i8_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v16, v8
-; CHECK-NEXT: vmv1r.v v17, v16
-; CHECK-NEXT: vmv1r.v v18, v16
-; CHECK-NEXT: vmv1r.v v19, v16
-; CHECK-NEXT: vmv1r.v v20, v16
-; CHECK-NEXT: vmv1r.v v21, v16
-; CHECK-NEXT: vmv1r.v v22, v16
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
+; CHECK-NEXT: vmv1r.v v19, v8
+; CHECK-NEXT: vmv1r.v v20, v8
+; CHECK-NEXT: vmv1r.v v21, v8
+; CHECK-NEXT: vmv1r.v v22, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vsoxseg7ei64.v v16, (a0), v12, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_nxv4i8_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vsoxseg7ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_mask_nxv4i8_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vsoxseg7ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_nxv4i8_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
-; CHECK-NEXT: vmv1r.v v18, v12
-; CHECK-NEXT: vmv1r.v v19, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
+; CHECK-NEXT: vmv1r.v v19, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vsoxseg8ei32.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_mask_nxv4i8_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
-; CHECK-NEXT: vmv1r.v v18, v12
-; CHECK-NEXT: vmv1r.v v19, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
+; CHECK-NEXT: vmv1r.v v19, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vsoxseg8ei32.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_nxv4i8_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vsoxseg8ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_mask_nxv4i8_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vsoxseg8ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_nxv4i8_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v16, v8
-; CHECK-NEXT: vmv1r.v v17, v16
-; CHECK-NEXT: vmv1r.v v18, v16
-; CHECK-NEXT: vmv1r.v v19, v16
-; CHECK-NEXT: vmv1r.v v20, v16
-; CHECK-NEXT: vmv1r.v v21, v16
-; CHECK-NEXT: vmv1r.v v22, v16
-; CHECK-NEXT: vmv1r.v v23, v16
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
+; CHECK-NEXT: vmv1r.v v19, v8
+; CHECK-NEXT: vmv1r.v v20, v8
+; CHECK-NEXT: vmv1r.v v21, v8
+; CHECK-NEXT: vmv1r.v v22, v8
+; CHECK-NEXT: vmv1r.v v23, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vsoxseg8ei64.v v16, (a0), v12
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_mask_nxv4i8_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v16, v8
-; CHECK-NEXT: vmv1r.v v17, v16
-; CHECK-NEXT: vmv1r.v v18, v16
-; CHECK-NEXT: vmv1r.v v19, v16
-; CHECK-NEXT: vmv1r.v v20, v16
-; CHECK-NEXT: vmv1r.v v21, v16
-; CHECK-NEXT: vmv1r.v v22, v16
-; CHECK-NEXT: vmv1r.v v23, v16
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
+; CHECK-NEXT: vmv1r.v v19, v8
+; CHECK-NEXT: vmv1r.v v20, v8
+; CHECK-NEXT: vmv1r.v v21, v8
+; CHECK-NEXT: vmv1r.v v22, v8
+; CHECK-NEXT: vmv1r.v v23, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vsoxseg8ei64.v v16, (a0), v12, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_nxv4i8_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vsoxseg8ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_mask_nxv4i8_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vsoxseg8ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
define void @test_vsoxseg2_nxv1i16_nxv1i64(<vscale x 1 x i16> %val, i16* %base, <vscale x 1 x i64> %index, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv1i16_nxv1i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
define void @test_vsoxseg2_mask_nxv1i16_nxv1i64(<vscale x 1 x i16> %val, i16* %base, <vscale x 1 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv1i16_nxv1i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
define void @test_vsoxseg2_nxv1i16_nxv1i32(<vscale x 1 x i16> %val, i16* %base, <vscale x 1 x i32> %index, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv1i16_nxv1i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
define void @test_vsoxseg2_mask_nxv1i16_nxv1i32(<vscale x 1 x i16> %val, i16* %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv1i16_nxv1i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
define void @test_vsoxseg2_nxv1i16_nxv1i16(<vscale x 1 x i16> %val, i16* %base, <vscale x 1 x i16> %index, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv1i16_nxv1i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
define void @test_vsoxseg2_mask_nxv1i16_nxv1i16(<vscale x 1 x i16> %val, i16* %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv1i16_nxv1i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
define void @test_vsoxseg2_nxv1i16_nxv1i8(<vscale x 1 x i16> %val, i16* %base, <vscale x 1 x i8> %index, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv1i16_nxv1i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
define void @test_vsoxseg2_mask_nxv1i16_nxv1i8(<vscale x 1 x i16> %val, i16* %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv1i16_nxv1i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-LABEL: test_vsoxseg3_nxv1i16_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vsoxseg3ei64.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_mask_nxv1i16_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vsoxseg3ei64.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_nxv1i16_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vsoxseg3ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_mask_nxv1i16_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vsoxseg3ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_nxv1i16_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vsoxseg3ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_mask_nxv1i16_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vsoxseg3ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_nxv1i16_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vsoxseg3ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_mask_nxv1i16_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vsoxseg3ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_nxv1i16_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vsoxseg4ei64.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_mask_nxv1i16_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vsoxseg4ei64.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_nxv1i16_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vsoxseg4ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_mask_nxv1i16_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vsoxseg4ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_nxv1i16_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vsoxseg4ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_mask_nxv1i16_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vsoxseg4ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_nxv1i16_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vsoxseg4ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_mask_nxv1i16_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vsoxseg4ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_nxv1i16_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vsoxseg5ei64.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_mask_nxv1i16_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vsoxseg5ei64.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_nxv1i16_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vsoxseg5ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_mask_nxv1i16_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vsoxseg5ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_nxv1i16_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vsoxseg5ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_mask_nxv1i16_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vsoxseg5ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_nxv1i16_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vsoxseg5ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_mask_nxv1i16_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vsoxseg5ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_nxv1i16_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vsoxseg6ei64.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_mask_nxv1i16_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vsoxseg6ei64.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_nxv1i16_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vsoxseg6ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_mask_nxv1i16_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vsoxseg6ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_nxv1i16_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vsoxseg6ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_mask_nxv1i16_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vsoxseg6ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_nxv1i16_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vsoxseg6ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_mask_nxv1i16_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vsoxseg6ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_nxv1i16_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vsoxseg7ei64.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_mask_nxv1i16_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vsoxseg7ei64.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_nxv1i16_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vsoxseg7ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_mask_nxv1i16_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vsoxseg7ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_nxv1i16_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vsoxseg7ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_mask_nxv1i16_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vsoxseg7ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_nxv1i16_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vsoxseg7ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_mask_nxv1i16_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
-; CHECK-NEXT: vsoxseg7ei8.v v10, (a0), v9, v0.t
-; CHECK-NEXT: ret
-entry:
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
+; CHECK-NEXT: vsoxseg7ei8.v v10, (a0), v9, v0.t
+; CHECK-NEXT: ret
+entry:
tail call void @llvm.riscv.vsoxseg7.mask.nxv1i16.nxv1i8(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl)
ret void
}
; CHECK-LABEL: test_vsoxseg8_nxv1i16_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vsoxseg8ei64.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_mask_nxv1i16_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vsoxseg8ei64.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_nxv1i16_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vsoxseg8ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_mask_nxv1i16_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vsoxseg8ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_nxv1i16_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vsoxseg8ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_mask_nxv1i16_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vsoxseg8ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_nxv1i16_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vsoxseg8ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_mask_nxv1i16_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vsoxseg8ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
define void @test_vsoxseg2_nxv2i32_nxv2i32(<vscale x 2 x i32> %val, i32* %base, <vscale x 2 x i32> %index, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv2i32_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
define void @test_vsoxseg2_mask_nxv2i32_nxv2i32(<vscale x 2 x i32> %val, i32* %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv2i32_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
define void @test_vsoxseg2_nxv2i32_nxv2i8(<vscale x 2 x i32> %val, i32* %base, <vscale x 2 x i8> %index, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv2i32_nxv2i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
define void @test_vsoxseg2_mask_nxv2i32_nxv2i8(<vscale x 2 x i32> %val, i32* %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv2i32_nxv2i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
define void @test_vsoxseg2_nxv2i32_nxv2i16(<vscale x 2 x i32> %val, i32* %base, <vscale x 2 x i16> %index, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv2i32_nxv2i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
define void @test_vsoxseg2_mask_nxv2i32_nxv2i16(<vscale x 2 x i32> %val, i32* %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv2i32_nxv2i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
define void @test_vsoxseg2_nxv2i32_nxv2i64(<vscale x 2 x i32> %val, i32* %base, <vscale x 2 x i64> %index, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv2i32_nxv2i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vsoxseg2ei64.v v8, (a0), v10
define void @test_vsoxseg2_mask_nxv2i32_nxv2i64(<vscale x 2 x i32> %val, i32* %base, <vscale x 2 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv2i32_nxv2i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vsoxseg2ei64.v v8, (a0), v10, v0.t
; CHECK-LABEL: test_vsoxseg3_nxv2i32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vsoxseg3ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_mask_nxv2i32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vsoxseg3ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_nxv2i32_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vsoxseg3ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_mask_nxv2i32_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vsoxseg3ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_nxv2i32_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vsoxseg3ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_mask_nxv2i32_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vsoxseg3ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
define void @test_vsoxseg3_nxv2i32_nxv2i64(<vscale x 2 x i32> %val, i32* %base, <vscale x 2 x i64> %index, i64 %vl) {
; CHECK-LABEL: test_vsoxseg3_nxv2i32_nxv2i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv2r.v v12, v10
+; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
-; CHECK-NEXT: vsoxseg3ei64.v v12, (a0), v10
+; CHECK-NEXT: vsoxseg3ei64.v v8, (a0), v12
; CHECK-NEXT: ret
entry:
tail call void @llvm.riscv.vsoxseg3.nxv2i32.nxv2i64(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 2 x i64> %index, i64 %vl)
define void @test_vsoxseg3_mask_nxv2i32_nxv2i64(<vscale x 2 x i32> %val, i32* %base, <vscale x 2 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsoxseg3_mask_nxv2i32_nxv2i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv2r.v v12, v10
+; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
-; CHECK-NEXT: vsoxseg3ei64.v v12, (a0), v10, v0.t
+; CHECK-NEXT: vsoxseg3ei64.v v8, (a0), v12, v0.t
; CHECK-NEXT: ret
entry:
tail call void @llvm.riscv.vsoxseg3.mask.nxv2i32.nxv2i64(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 2 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl)
; CHECK-LABEL: test_vsoxseg4_nxv2i32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vsoxseg4ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_mask_nxv2i32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vsoxseg4ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_nxv2i32_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vsoxseg4ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_mask_nxv2i32_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vsoxseg4ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_nxv2i32_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vsoxseg4ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_mask_nxv2i32_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vsoxseg4ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_nxv2i32_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vsoxseg4ei64.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_mask_nxv2i32_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vsoxseg4ei64.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_nxv2i32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vsoxseg5ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_mask_nxv2i32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vsoxseg5ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_nxv2i32_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vsoxseg5ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_mask_nxv2i32_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vsoxseg5ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_nxv2i32_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vsoxseg5ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_mask_nxv2i32_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vsoxseg5ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_nxv2i32_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vsoxseg5ei64.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_mask_nxv2i32_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vsoxseg5ei64.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_nxv2i32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vsoxseg6ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_mask_nxv2i32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vsoxseg6ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_nxv2i32_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vsoxseg6ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_mask_nxv2i32_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vsoxseg6ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_nxv2i32_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vsoxseg6ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_mask_nxv2i32_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vsoxseg6ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_nxv2i32_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vsoxseg6ei64.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_mask_nxv2i32_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vsoxseg6ei64.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_nxv2i32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vsoxseg7ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_mask_nxv2i32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vsoxseg7ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_nxv2i32_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vsoxseg7ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_mask_nxv2i32_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vsoxseg7ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_nxv2i32_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vsoxseg7ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_mask_nxv2i32_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vsoxseg7ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_nxv2i32_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
-; CHECK-NEXT: vmv1r.v v18, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vsoxseg7ei64.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_mask_nxv2i32_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
-; CHECK-NEXT: vmv1r.v v18, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vsoxseg7ei64.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_nxv2i32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vsoxseg8ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_mask_nxv2i32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vsoxseg8ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_nxv2i32_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vsoxseg8ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_mask_nxv2i32_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vsoxseg8ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_nxv2i32_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vsoxseg8ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_mask_nxv2i32_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vsoxseg8ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_nxv2i32_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
-; CHECK-NEXT: vmv1r.v v18, v12
-; CHECK-NEXT: vmv1r.v v19, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
+; CHECK-NEXT: vmv1r.v v19, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vsoxseg8ei64.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_mask_nxv2i32_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
-; CHECK-NEXT: vmv1r.v v18, v12
-; CHECK-NEXT: vmv1r.v v19, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
+; CHECK-NEXT: vmv1r.v v19, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vsoxseg8ei64.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
define void @test_vsoxseg2_nxv8i8_nxv8i16(<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i16> %index, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv8i8_nxv8i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v10
define void @test_vsoxseg2_mask_nxv8i8_nxv8i16(<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i16> %index, <vscale x 8 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv8i8_nxv8i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v10, v0.t
define void @test_vsoxseg2_nxv8i8_nxv8i8(<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i8> %index, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv8i8_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
define void @test_vsoxseg2_mask_nxv8i8_nxv8i8(<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i8> %index, <vscale x 8 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv8i8_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
define void @test_vsoxseg2_nxv8i8_nxv8i64(<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i64> %index, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv8i8_nxv8i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vsoxseg2ei64.v v8, (a0), v16
define void @test_vsoxseg2_mask_nxv8i8_nxv8i64(<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i64> %index, <vscale x 8 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv8i8_nxv8i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vsoxseg2ei64.v v8, (a0), v16, v0.t
define void @test_vsoxseg2_nxv8i8_nxv8i32(<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i32> %index, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv8i8_nxv8i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v12
define void @test_vsoxseg2_mask_nxv8i8_nxv8i32(<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv8i8_nxv8i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v12, v0.t
define void @test_vsoxseg3_nxv8i8_nxv8i16(<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i16> %index, i64 %vl) {
; CHECK-LABEL: test_vsoxseg3_nxv8i8_nxv8i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv2r.v v12, v10
+; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
-; CHECK-NEXT: vsoxseg3ei16.v v12, (a0), v10
+; CHECK-NEXT: vsoxseg3ei16.v v8, (a0), v12
; CHECK-NEXT: ret
entry:
tail call void @llvm.riscv.vsoxseg3.nxv8i8.nxv8i16(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i16> %index, i64 %vl)
define void @test_vsoxseg3_mask_nxv8i8_nxv8i16(<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i16> %index, <vscale x 8 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsoxseg3_mask_nxv8i8_nxv8i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv2r.v v12, v10
+; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
-; CHECK-NEXT: vsoxseg3ei16.v v12, (a0), v10, v0.t
+; CHECK-NEXT: vsoxseg3ei16.v v8, (a0), v12, v0.t
; CHECK-NEXT: ret
entry:
tail call void @llvm.riscv.vsoxseg3.mask.nxv8i8.nxv8i16(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i16> %index, <vscale x 8 x i1> %mask, i64 %vl)
; CHECK-LABEL: test_vsoxseg3_nxv8i8_nxv8i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vsoxseg3ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_mask_nxv8i8_nxv8i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vsoxseg3ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
define void @test_vsoxseg3_nxv8i8_nxv8i64(<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i64> %index, i64 %vl) {
; CHECK-LABEL: test_vsoxseg3_nxv8i8_nxv8i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
define void @test_vsoxseg3_mask_nxv8i8_nxv8i64(<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i64> %index, <vscale x 8 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsoxseg3_mask_nxv8i8_nxv8i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
define void @test_vsoxseg3_nxv8i8_nxv8i32(<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i32> %index, i64 %vl) {
; CHECK-LABEL: test_vsoxseg3_nxv8i8_nxv8i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
define void @test_vsoxseg3_mask_nxv8i8_nxv8i32(<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsoxseg3_mask_nxv8i8_nxv8i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-LABEL: test_vsoxseg4_nxv8i8_nxv8i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vsoxseg4ei16.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_mask_nxv8i8_nxv8i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vsoxseg4ei16.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_nxv8i8_nxv8i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vsoxseg4ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_mask_nxv8i8_nxv8i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vsoxseg4ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
define void @test_vsoxseg4_nxv8i8_nxv8i64(<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i64> %index, i64 %vl) {
; CHECK-LABEL: test_vsoxseg4_nxv8i8_nxv8i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsoxseg4_mask_nxv8i8_nxv8i64(<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i64> %index, <vscale x 8 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsoxseg4_mask_nxv8i8_nxv8i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsoxseg4_nxv8i8_nxv8i32(<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i32> %index, i64 %vl) {
; CHECK-LABEL: test_vsoxseg4_nxv8i8_nxv8i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsoxseg4_mask_nxv8i8_nxv8i32(<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsoxseg4_mask_nxv8i8_nxv8i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
; CHECK-LABEL: test_vsoxseg5_nxv8i8_nxv8i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vsoxseg5ei16.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_mask_nxv8i8_nxv8i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vsoxseg5ei16.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_nxv8i8_nxv8i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vsoxseg5ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_mask_nxv8i8_nxv8i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vsoxseg5ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
define void @test_vsoxseg5_nxv8i8_nxv8i64(<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i64> %index, i64 %vl) {
; CHECK-LABEL: test_vsoxseg5_nxv8i8_nxv8i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsoxseg5_mask_nxv8i8_nxv8i64(<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i64> %index, <vscale x 8 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsoxseg5_mask_nxv8i8_nxv8i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsoxseg5_nxv8i8_nxv8i32(<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i32> %index, i64 %vl) {
; CHECK-LABEL: test_vsoxseg5_nxv8i8_nxv8i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v16, v8
-; CHECK-NEXT: vmv1r.v v17, v16
-; CHECK-NEXT: vmv1r.v v18, v16
-; CHECK-NEXT: vmv1r.v v19, v16
-; CHECK-NEXT: vmv1r.v v20, v16
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv4r.v v16, v12
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
-; CHECK-NEXT: vsoxseg5ei32.v v16, (a0), v12
+; CHECK-NEXT: vsoxseg5ei32.v v8, (a0), v16
; CHECK-NEXT: ret
entry:
tail call void @llvm.riscv.vsoxseg5.nxv8i8.nxv8i32(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i32> %index, i64 %vl)
define void @test_vsoxseg5_mask_nxv8i8_nxv8i32(<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsoxseg5_mask_nxv8i8_nxv8i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v16, v8
-; CHECK-NEXT: vmv1r.v v17, v16
-; CHECK-NEXT: vmv1r.v v18, v16
-; CHECK-NEXT: vmv1r.v v19, v16
-; CHECK-NEXT: vmv1r.v v20, v16
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv4r.v v16, v12
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
-; CHECK-NEXT: vsoxseg5ei32.v v16, (a0), v12, v0.t
+; CHECK-NEXT: vsoxseg5ei32.v v8, (a0), v16, v0.t
; CHECK-NEXT: ret
entry:
tail call void @llvm.riscv.vsoxseg5.mask.nxv8i8.nxv8i32(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i64 %vl)
; CHECK-LABEL: test_vsoxseg6_nxv8i8_nxv8i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vsoxseg6ei16.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_mask_nxv8i8_nxv8i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vsoxseg6ei16.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_nxv8i8_nxv8i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vsoxseg6ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_mask_nxv8i8_nxv8i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vsoxseg6ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
define void @test_vsoxseg6_nxv8i8_nxv8i64(<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i64> %index, i64 %vl) {
; CHECK-LABEL: test_vsoxseg6_nxv8i8_nxv8i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsoxseg6_mask_nxv8i8_nxv8i64(<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i64> %index, <vscale x 8 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsoxseg6_mask_nxv8i8_nxv8i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
; CHECK-LABEL: test_vsoxseg6_nxv8i8_nxv8i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v16, v8
-; CHECK-NEXT: vmv1r.v v17, v16
-; CHECK-NEXT: vmv1r.v v18, v16
-; CHECK-NEXT: vmv1r.v v19, v16
-; CHECK-NEXT: vmv1r.v v20, v16
-; CHECK-NEXT: vmv1r.v v21, v16
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
+; CHECK-NEXT: vmv1r.v v19, v8
+; CHECK-NEXT: vmv1r.v v20, v8
+; CHECK-NEXT: vmv1r.v v21, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vsoxseg6ei32.v v16, (a0), v12
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_mask_nxv8i8_nxv8i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v16, v8
-; CHECK-NEXT: vmv1r.v v17, v16
-; CHECK-NEXT: vmv1r.v v18, v16
-; CHECK-NEXT: vmv1r.v v19, v16
-; CHECK-NEXT: vmv1r.v v20, v16
-; CHECK-NEXT: vmv1r.v v21, v16
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
+; CHECK-NEXT: vmv1r.v v19, v8
+; CHECK-NEXT: vmv1r.v v20, v8
+; CHECK-NEXT: vmv1r.v v21, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vsoxseg6ei32.v v16, (a0), v12, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_nxv8i8_nxv8i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
-; CHECK-NEXT: vmv1r.v v18, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vsoxseg7ei16.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_mask_nxv8i8_nxv8i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
-; CHECK-NEXT: vmv1r.v v18, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vsoxseg7ei16.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_nxv8i8_nxv8i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vsoxseg7ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_mask_nxv8i8_nxv8i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vsoxseg7ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
define void @test_vsoxseg7_nxv8i8_nxv8i64(<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i64> %index, i64 %vl) {
; CHECK-LABEL: test_vsoxseg7_nxv8i8_nxv8i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsoxseg7_mask_nxv8i8_nxv8i64(<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i64> %index, <vscale x 8 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsoxseg7_mask_nxv8i8_nxv8i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
; CHECK-LABEL: test_vsoxseg7_nxv8i8_nxv8i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v16, v8
-; CHECK-NEXT: vmv1r.v v17, v16
-; CHECK-NEXT: vmv1r.v v18, v16
-; CHECK-NEXT: vmv1r.v v19, v16
-; CHECK-NEXT: vmv1r.v v20, v16
-; CHECK-NEXT: vmv1r.v v21, v16
-; CHECK-NEXT: vmv1r.v v22, v16
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
+; CHECK-NEXT: vmv1r.v v19, v8
+; CHECK-NEXT: vmv1r.v v20, v8
+; CHECK-NEXT: vmv1r.v v21, v8
+; CHECK-NEXT: vmv1r.v v22, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vsoxseg7ei32.v v16, (a0), v12
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_mask_nxv8i8_nxv8i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v16, v8
-; CHECK-NEXT: vmv1r.v v17, v16
-; CHECK-NEXT: vmv1r.v v18, v16
-; CHECK-NEXT: vmv1r.v v19, v16
-; CHECK-NEXT: vmv1r.v v20, v16
-; CHECK-NEXT: vmv1r.v v21, v16
-; CHECK-NEXT: vmv1r.v v22, v16
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
+; CHECK-NEXT: vmv1r.v v19, v8
+; CHECK-NEXT: vmv1r.v v20, v8
+; CHECK-NEXT: vmv1r.v v21, v8
+; CHECK-NEXT: vmv1r.v v22, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vsoxseg7ei32.v v16, (a0), v12, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_nxv8i8_nxv8i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
-; CHECK-NEXT: vmv1r.v v18, v12
-; CHECK-NEXT: vmv1r.v v19, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
+; CHECK-NEXT: vmv1r.v v19, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vsoxseg8ei16.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_mask_nxv8i8_nxv8i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
-; CHECK-NEXT: vmv1r.v v18, v12
-; CHECK-NEXT: vmv1r.v v19, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
+; CHECK-NEXT: vmv1r.v v19, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vsoxseg8ei16.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_nxv8i8_nxv8i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vsoxseg8ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_mask_nxv8i8_nxv8i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vsoxseg8ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
define void @test_vsoxseg8_nxv8i8_nxv8i64(<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i64> %index, i64 %vl) {
; CHECK-LABEL: test_vsoxseg8_nxv8i8_nxv8i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsoxseg8_mask_nxv8i8_nxv8i64(<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i64> %index, <vscale x 8 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsoxseg8_mask_nxv8i8_nxv8i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
; CHECK-LABEL: test_vsoxseg8_nxv8i8_nxv8i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v16, v8
-; CHECK-NEXT: vmv1r.v v17, v16
-; CHECK-NEXT: vmv1r.v v18, v16
-; CHECK-NEXT: vmv1r.v v19, v16
-; CHECK-NEXT: vmv1r.v v20, v16
-; CHECK-NEXT: vmv1r.v v21, v16
-; CHECK-NEXT: vmv1r.v v22, v16
-; CHECK-NEXT: vmv1r.v v23, v16
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
+; CHECK-NEXT: vmv1r.v v19, v8
+; CHECK-NEXT: vmv1r.v v20, v8
+; CHECK-NEXT: vmv1r.v v21, v8
+; CHECK-NEXT: vmv1r.v v22, v8
+; CHECK-NEXT: vmv1r.v v23, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vsoxseg8ei32.v v16, (a0), v12
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_mask_nxv8i8_nxv8i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v16, v8
-; CHECK-NEXT: vmv1r.v v17, v16
-; CHECK-NEXT: vmv1r.v v18, v16
-; CHECK-NEXT: vmv1r.v v19, v16
-; CHECK-NEXT: vmv1r.v v20, v16
-; CHECK-NEXT: vmv1r.v v21, v16
-; CHECK-NEXT: vmv1r.v v22, v16
-; CHECK-NEXT: vmv1r.v v23, v16
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
+; CHECK-NEXT: vmv1r.v v19, v8
+; CHECK-NEXT: vmv1r.v v20, v8
+; CHECK-NEXT: vmv1r.v v21, v8
+; CHECK-NEXT: vmv1r.v v22, v8
+; CHECK-NEXT: vmv1r.v v23, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vsoxseg8ei32.v v16, (a0), v12, v0.t
; CHECK-NEXT: ret
define void @test_vsoxseg2_nxv4i64_nxv4i32(<vscale x 4 x i64> %val, i64* %base, <vscale x 4 x i32> %index, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv4i64_nxv4i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
; CHECK-NEXT: vmv2r.v v16, v12
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu
define void @test_vsoxseg2_mask_nxv4i64_nxv4i32(<vscale x 4 x i64> %val, i64* %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv4i64_nxv4i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
; CHECK-NEXT: vmv2r.v v16, v12
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu
define void @test_vsoxseg2_nxv4i64_nxv4i8(<vscale x 4 x i64> %val, i64* %base, <vscale x 4 x i8> %index, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv4i64_nxv4i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
; CHECK-NEXT: vmv1r.v v16, v12
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu
define void @test_vsoxseg2_mask_nxv4i64_nxv4i8(<vscale x 4 x i64> %val, i64* %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv4i64_nxv4i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
; CHECK-NEXT: vmv1r.v v16, v12
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu
define void @test_vsoxseg2_nxv4i64_nxv4i64(<vscale x 4 x i64> %val, i64* %base, <vscale x 4 x i64> %index, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv4i64_nxv4i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
; CHECK-NEXT: vmv4r.v v16, v12
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu
define void @test_vsoxseg2_mask_nxv4i64_nxv4i64(<vscale x 4 x i64> %val, i64* %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv4i64_nxv4i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
; CHECK-NEXT: vmv4r.v v16, v12
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu
define void @test_vsoxseg2_nxv4i64_nxv4i16(<vscale x 4 x i64> %val, i64* %base, <vscale x 4 x i16> %index, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv4i64_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
; CHECK-NEXT: vmv1r.v v16, v12
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu
define void @test_vsoxseg2_mask_nxv4i64_nxv4i16(<vscale x 4 x i64> %val, i64* %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv4i64_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
; CHECK-NEXT: vmv1r.v v16, v12
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu
define void @test_vsoxseg2_nxv4i16_nxv4i32(<vscale x 4 x i16> %val, i16* %base, <vscale x 4 x i32> %index, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv4i16_nxv4i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v10
define void @test_vsoxseg2_mask_nxv4i16_nxv4i32(<vscale x 4 x i16> %val, i16* %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv4i16_nxv4i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v10, v0.t
define void @test_vsoxseg2_nxv4i16_nxv4i8(<vscale x 4 x i16> %val, i16* %base, <vscale x 4 x i8> %index, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv4i16_nxv4i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
define void @test_vsoxseg2_mask_nxv4i16_nxv4i8(<vscale x 4 x i16> %val, i16* %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv4i16_nxv4i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
define void @test_vsoxseg2_nxv4i16_nxv4i64(<vscale x 4 x i16> %val, i16* %base, <vscale x 4 x i64> %index, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv4i16_nxv4i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vsoxseg2ei64.v v8, (a0), v12
define void @test_vsoxseg2_mask_nxv4i16_nxv4i64(<vscale x 4 x i16> %val, i16* %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv4i16_nxv4i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vsoxseg2ei64.v v8, (a0), v12, v0.t
define void @test_vsoxseg2_nxv4i16_nxv4i16(<vscale x 4 x i16> %val, i16* %base, <vscale x 4 x i16> %index, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv4i16_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
define void @test_vsoxseg2_mask_nxv4i16_nxv4i16(<vscale x 4 x i16> %val, i16* %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv4i16_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
define void @test_vsoxseg3_nxv4i16_nxv4i32(<vscale x 4 x i16> %val, i16* %base, <vscale x 4 x i32> %index, i64 %vl) {
; CHECK-LABEL: test_vsoxseg3_nxv4i16_nxv4i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv2r.v v12, v10
+; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
-; CHECK-NEXT: vsoxseg3ei32.v v12, (a0), v10
+; CHECK-NEXT: vsoxseg3ei32.v v8, (a0), v12
; CHECK-NEXT: ret
entry:
tail call void @llvm.riscv.vsoxseg3.nxv4i16.nxv4i32(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 4 x i32> %index, i64 %vl)
define void @test_vsoxseg3_mask_nxv4i16_nxv4i32(<vscale x 4 x i16> %val, i16* %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsoxseg3_mask_nxv4i16_nxv4i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv2r.v v12, v10
+; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
-; CHECK-NEXT: vsoxseg3ei32.v v12, (a0), v10, v0.t
+; CHECK-NEXT: vsoxseg3ei32.v v8, (a0), v12, v0.t
; CHECK-NEXT: ret
entry:
tail call void @llvm.riscv.vsoxseg3.mask.nxv4i16.nxv4i32(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl)
; CHECK-LABEL: test_vsoxseg3_nxv4i16_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vsoxseg3ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_mask_nxv4i16_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vsoxseg3ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
define void @test_vsoxseg3_nxv4i16_nxv4i64(<vscale x 4 x i16> %val, i16* %base, <vscale x 4 x i64> %index, i64 %vl) {
; CHECK-LABEL: test_vsoxseg3_nxv4i16_nxv4i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
define void @test_vsoxseg3_mask_nxv4i16_nxv4i64(<vscale x 4 x i16> %val, i16* %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsoxseg3_mask_nxv4i16_nxv4i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-LABEL: test_vsoxseg3_nxv4i16_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vsoxseg3ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_mask_nxv4i16_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vsoxseg3ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_nxv4i16_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vsoxseg4ei32.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_mask_nxv4i16_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vsoxseg4ei32.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_nxv4i16_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vsoxseg4ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_mask_nxv4i16_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vsoxseg4ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
define void @test_vsoxseg4_nxv4i16_nxv4i64(<vscale x 4 x i16> %val, i16* %base, <vscale x 4 x i64> %index, i64 %vl) {
; CHECK-LABEL: test_vsoxseg4_nxv4i16_nxv4i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsoxseg4_mask_nxv4i16_nxv4i64(<vscale x 4 x i16> %val, i16* %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsoxseg4_mask_nxv4i16_nxv4i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
; CHECK-LABEL: test_vsoxseg4_nxv4i16_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vsoxseg4ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_mask_nxv4i16_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vsoxseg4ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_nxv4i16_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vsoxseg5ei32.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_mask_nxv4i16_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vsoxseg5ei32.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_nxv4i16_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vsoxseg5ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_mask_nxv4i16_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vsoxseg5ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
define void @test_vsoxseg5_nxv4i16_nxv4i64(<vscale x 4 x i16> %val, i16* %base, <vscale x 4 x i64> %index, i64 %vl) {
; CHECK-LABEL: test_vsoxseg5_nxv4i16_nxv4i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v16, v8
-; CHECK-NEXT: vmv1r.v v17, v16
-; CHECK-NEXT: vmv1r.v v18, v16
-; CHECK-NEXT: vmv1r.v v19, v16
-; CHECK-NEXT: vmv1r.v v20, v16
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv4r.v v16, v12
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
-; CHECK-NEXT: vsoxseg5ei64.v v16, (a0), v12
+; CHECK-NEXT: vsoxseg5ei64.v v8, (a0), v16
; CHECK-NEXT: ret
entry:
tail call void @llvm.riscv.vsoxseg5.nxv4i16.nxv4i64(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 4 x i64> %index, i64 %vl)
define void @test_vsoxseg5_mask_nxv4i16_nxv4i64(<vscale x 4 x i16> %val, i16* %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsoxseg5_mask_nxv4i16_nxv4i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v16, v8
-; CHECK-NEXT: vmv1r.v v17, v16
-; CHECK-NEXT: vmv1r.v v18, v16
-; CHECK-NEXT: vmv1r.v v19, v16
-; CHECK-NEXT: vmv1r.v v20, v16
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv4r.v v16, v12
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
-; CHECK-NEXT: vsoxseg5ei64.v v16, (a0), v12, v0.t
+; CHECK-NEXT: vsoxseg5ei64.v v8, (a0), v16, v0.t
; CHECK-NEXT: ret
entry:
tail call void @llvm.riscv.vsoxseg5.mask.nxv4i16.nxv4i64(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl)
; CHECK-LABEL: test_vsoxseg5_nxv4i16_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vsoxseg5ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_mask_nxv4i16_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vsoxseg5ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_nxv4i16_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vsoxseg6ei32.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_mask_nxv4i16_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vsoxseg6ei32.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_nxv4i16_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vsoxseg6ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_mask_nxv4i16_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vsoxseg6ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_nxv4i16_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v16, v8
-; CHECK-NEXT: vmv1r.v v17, v16
-; CHECK-NEXT: vmv1r.v v18, v16
-; CHECK-NEXT: vmv1r.v v19, v16
-; CHECK-NEXT: vmv1r.v v20, v16
-; CHECK-NEXT: vmv1r.v v21, v16
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
+; CHECK-NEXT: vmv1r.v v19, v8
+; CHECK-NEXT: vmv1r.v v20, v8
+; CHECK-NEXT: vmv1r.v v21, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vsoxseg6ei64.v v16, (a0), v12
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_mask_nxv4i16_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v16, v8
-; CHECK-NEXT: vmv1r.v v17, v16
-; CHECK-NEXT: vmv1r.v v18, v16
-; CHECK-NEXT: vmv1r.v v19, v16
-; CHECK-NEXT: vmv1r.v v20, v16
-; CHECK-NEXT: vmv1r.v v21, v16
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
+; CHECK-NEXT: vmv1r.v v19, v8
+; CHECK-NEXT: vmv1r.v v20, v8
+; CHECK-NEXT: vmv1r.v v21, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vsoxseg6ei64.v v16, (a0), v12, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_nxv4i16_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vsoxseg6ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_mask_nxv4i16_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vsoxseg6ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_nxv4i16_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
-; CHECK-NEXT: vmv1r.v v18, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vsoxseg7ei32.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_mask_nxv4i16_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
-; CHECK-NEXT: vmv1r.v v18, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vsoxseg7ei32.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_nxv4i16_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vsoxseg7ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_mask_nxv4i16_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vsoxseg7ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_nxv4i16_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v16, v8
-; CHECK-NEXT: vmv1r.v v17, v16
-; CHECK-NEXT: vmv1r.v v18, v16
-; CHECK-NEXT: vmv1r.v v19, v16
-; CHECK-NEXT: vmv1r.v v20, v16
-; CHECK-NEXT: vmv1r.v v21, v16
-; CHECK-NEXT: vmv1r.v v22, v16
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
+; CHECK-NEXT: vmv1r.v v19, v8
+; CHECK-NEXT: vmv1r.v v20, v8
+; CHECK-NEXT: vmv1r.v v21, v8
+; CHECK-NEXT: vmv1r.v v22, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vsoxseg7ei64.v v16, (a0), v12
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_mask_nxv4i16_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v16, v8
-; CHECK-NEXT: vmv1r.v v17, v16
-; CHECK-NEXT: vmv1r.v v18, v16
-; CHECK-NEXT: vmv1r.v v19, v16
-; CHECK-NEXT: vmv1r.v v20, v16
-; CHECK-NEXT: vmv1r.v v21, v16
-; CHECK-NEXT: vmv1r.v v22, v16
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
+; CHECK-NEXT: vmv1r.v v19, v8
+; CHECK-NEXT: vmv1r.v v20, v8
+; CHECK-NEXT: vmv1r.v v21, v8
+; CHECK-NEXT: vmv1r.v v22, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vsoxseg7ei64.v v16, (a0), v12, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_nxv4i16_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vsoxseg7ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_mask_nxv4i16_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vsoxseg7ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_nxv4i16_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
-; CHECK-NEXT: vmv1r.v v18, v12
-; CHECK-NEXT: vmv1r.v v19, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
+; CHECK-NEXT: vmv1r.v v19, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vsoxseg8ei32.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_mask_nxv4i16_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
-; CHECK-NEXT: vmv1r.v v18, v12
-; CHECK-NEXT: vmv1r.v v19, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
+; CHECK-NEXT: vmv1r.v v19, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vsoxseg8ei32.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_nxv4i16_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vsoxseg8ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_mask_nxv4i16_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vsoxseg8ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_nxv4i16_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v16, v8
-; CHECK-NEXT: vmv1r.v v17, v16
-; CHECK-NEXT: vmv1r.v v18, v16
-; CHECK-NEXT: vmv1r.v v19, v16
-; CHECK-NEXT: vmv1r.v v20, v16
-; CHECK-NEXT: vmv1r.v v21, v16
-; CHECK-NEXT: vmv1r.v v22, v16
-; CHECK-NEXT: vmv1r.v v23, v16
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
+; CHECK-NEXT: vmv1r.v v19, v8
+; CHECK-NEXT: vmv1r.v v20, v8
+; CHECK-NEXT: vmv1r.v v21, v8
+; CHECK-NEXT: vmv1r.v v22, v8
+; CHECK-NEXT: vmv1r.v v23, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vsoxseg8ei64.v v16, (a0), v12
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_mask_nxv4i16_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v16, v8
-; CHECK-NEXT: vmv1r.v v17, v16
-; CHECK-NEXT: vmv1r.v v18, v16
-; CHECK-NEXT: vmv1r.v v19, v16
-; CHECK-NEXT: vmv1r.v v20, v16
-; CHECK-NEXT: vmv1r.v v21, v16
-; CHECK-NEXT: vmv1r.v v22, v16
-; CHECK-NEXT: vmv1r.v v23, v16
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
+; CHECK-NEXT: vmv1r.v v19, v8
+; CHECK-NEXT: vmv1r.v v20, v8
+; CHECK-NEXT: vmv1r.v v21, v8
+; CHECK-NEXT: vmv1r.v v22, v8
+; CHECK-NEXT: vmv1r.v v23, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vsoxseg8ei64.v v16, (a0), v12, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_nxv4i16_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vsoxseg8ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_mask_nxv4i16_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vsoxseg8ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
define void @test_vsoxseg2_nxv1i8_nxv1i64(<vscale x 1 x i8> %val, i8* %base, <vscale x 1 x i64> %index, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv1i8_nxv1i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
define void @test_vsoxseg2_mask_nxv1i8_nxv1i64(<vscale x 1 x i8> %val, i8* %base, <vscale x 1 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv1i8_nxv1i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
define void @test_vsoxseg2_nxv1i8_nxv1i32(<vscale x 1 x i8> %val, i8* %base, <vscale x 1 x i32> %index, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv1i8_nxv1i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
define void @test_vsoxseg2_mask_nxv1i8_nxv1i32(<vscale x 1 x i8> %val, i8* %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv1i8_nxv1i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
define void @test_vsoxseg2_nxv1i8_nxv1i16(<vscale x 1 x i8> %val, i8* %base, <vscale x 1 x i16> %index, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv1i8_nxv1i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
define void @test_vsoxseg2_mask_nxv1i8_nxv1i16(<vscale x 1 x i8> %val, i8* %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv1i8_nxv1i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
define void @test_vsoxseg2_nxv1i8_nxv1i8(<vscale x 1 x i8> %val, i8* %base, <vscale x 1 x i8> %index, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv1i8_nxv1i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
define void @test_vsoxseg2_mask_nxv1i8_nxv1i8(<vscale x 1 x i8> %val, i8* %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv1i8_nxv1i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-LABEL: test_vsoxseg3_nxv1i8_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vsoxseg3ei64.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_mask_nxv1i8_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vsoxseg3ei64.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_nxv1i8_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vsoxseg3ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_mask_nxv1i8_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vsoxseg3ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_nxv1i8_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vsoxseg3ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_mask_nxv1i8_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vsoxseg3ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_nxv1i8_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vsoxseg3ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_mask_nxv1i8_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vsoxseg3ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_nxv1i8_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vsoxseg4ei64.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_mask_nxv1i8_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vsoxseg4ei64.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_nxv1i8_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vsoxseg4ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_mask_nxv1i8_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vsoxseg4ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_nxv1i8_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vsoxseg4ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_mask_nxv1i8_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vsoxseg4ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_nxv1i8_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vsoxseg4ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_mask_nxv1i8_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vsoxseg4ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_nxv1i8_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vsoxseg5ei64.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_mask_nxv1i8_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vsoxseg5ei64.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_nxv1i8_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vsoxseg5ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_mask_nxv1i8_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vsoxseg5ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_nxv1i8_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vsoxseg5ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_mask_nxv1i8_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vsoxseg5ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_nxv1i8_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vsoxseg5ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_mask_nxv1i8_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vsoxseg5ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_nxv1i8_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vsoxseg6ei64.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_mask_nxv1i8_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vsoxseg6ei64.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_nxv1i8_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vsoxseg6ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_mask_nxv1i8_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vsoxseg6ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_nxv1i8_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vsoxseg6ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_mask_nxv1i8_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vsoxseg6ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_nxv1i8_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vsoxseg6ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_mask_nxv1i8_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vsoxseg6ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_nxv1i8_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vsoxseg7ei64.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_mask_nxv1i8_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vsoxseg7ei64.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_nxv1i8_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vsoxseg7ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_mask_nxv1i8_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vsoxseg7ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_nxv1i8_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vsoxseg7ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_mask_nxv1i8_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vsoxseg7ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_nxv1i8_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vsoxseg7ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_mask_nxv1i8_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vsoxseg7ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_nxv1i8_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vsoxseg8ei64.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_mask_nxv1i8_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vsoxseg8ei64.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_nxv1i8_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vsoxseg8ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_mask_nxv1i8_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vsoxseg8ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_nxv1i8_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vsoxseg8ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_mask_nxv1i8_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vsoxseg8ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_nxv1i8_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vsoxseg8ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_mask_nxv1i8_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vsoxseg8ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
define void @test_vsoxseg2_nxv2i8_nxv2i32(<vscale x 2 x i8> %val, i8* %base, <vscale x 2 x i32> %index, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv2i8_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
define void @test_vsoxseg2_mask_nxv2i8_nxv2i32(<vscale x 2 x i8> %val, i8* %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv2i8_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
define void @test_vsoxseg2_nxv2i8_nxv2i8(<vscale x 2 x i8> %val, i8* %base, <vscale x 2 x i8> %index, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv2i8_nxv2i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
define void @test_vsoxseg2_mask_nxv2i8_nxv2i8(<vscale x 2 x i8> %val, i8* %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv2i8_nxv2i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
define void @test_vsoxseg2_nxv2i8_nxv2i16(<vscale x 2 x i8> %val, i8* %base, <vscale x 2 x i16> %index, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv2i8_nxv2i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
define void @test_vsoxseg2_mask_nxv2i8_nxv2i16(<vscale x 2 x i8> %val, i8* %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv2i8_nxv2i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
define void @test_vsoxseg2_nxv2i8_nxv2i64(<vscale x 2 x i8> %val, i8* %base, <vscale x 2 x i64> %index, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv2i8_nxv2i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vsoxseg2ei64.v v8, (a0), v10
define void @test_vsoxseg2_mask_nxv2i8_nxv2i64(<vscale x 2 x i8> %val, i8* %base, <vscale x 2 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv2i8_nxv2i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vsoxseg2ei64.v v8, (a0), v10, v0.t
; CHECK-LABEL: test_vsoxseg3_nxv2i8_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vsoxseg3ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_mask_nxv2i8_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vsoxseg3ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_nxv2i8_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vsoxseg3ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_mask_nxv2i8_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vsoxseg3ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_nxv2i8_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vsoxseg3ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_mask_nxv2i8_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vsoxseg3ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
define void @test_vsoxseg3_nxv2i8_nxv2i64(<vscale x 2 x i8> %val, i8* %base, <vscale x 2 x i64> %index, i64 %vl) {
; CHECK-LABEL: test_vsoxseg3_nxv2i8_nxv2i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv2r.v v12, v10
+; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
-; CHECK-NEXT: vsoxseg3ei64.v v12, (a0), v10
+; CHECK-NEXT: vsoxseg3ei64.v v8, (a0), v12
; CHECK-NEXT: ret
entry:
tail call void @llvm.riscv.vsoxseg3.nxv2i8.nxv2i64(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, i8* %base, <vscale x 2 x i64> %index, i64 %vl)
define void @test_vsoxseg3_mask_nxv2i8_nxv2i64(<vscale x 2 x i8> %val, i8* %base, <vscale x 2 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsoxseg3_mask_nxv2i8_nxv2i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv2r.v v12, v10
+; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
-; CHECK-NEXT: vsoxseg3ei64.v v12, (a0), v10, v0.t
+; CHECK-NEXT: vsoxseg3ei64.v v8, (a0), v12, v0.t
; CHECK-NEXT: ret
entry:
tail call void @llvm.riscv.vsoxseg3.mask.nxv2i8.nxv2i64(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, i8* %base, <vscale x 2 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl)
; CHECK-LABEL: test_vsoxseg4_nxv2i8_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vsoxseg4ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_mask_nxv2i8_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vsoxseg4ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_nxv2i8_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vsoxseg4ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_mask_nxv2i8_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vsoxseg4ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_nxv2i8_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vsoxseg4ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_mask_nxv2i8_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vsoxseg4ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_nxv2i8_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vsoxseg4ei64.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_mask_nxv2i8_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vsoxseg4ei64.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_nxv2i8_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vsoxseg5ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_mask_nxv2i8_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vsoxseg5ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_nxv2i8_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vsoxseg5ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_mask_nxv2i8_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vsoxseg5ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_nxv2i8_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vsoxseg5ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_mask_nxv2i8_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vsoxseg5ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_nxv2i8_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vsoxseg5ei64.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_mask_nxv2i8_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vsoxseg5ei64.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_nxv2i8_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vsoxseg6ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_mask_nxv2i8_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vsoxseg6ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_nxv2i8_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vsoxseg6ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_mask_nxv2i8_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vsoxseg6ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_nxv2i8_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vsoxseg6ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_mask_nxv2i8_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vsoxseg6ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_nxv2i8_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vsoxseg6ei64.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_mask_nxv2i8_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vsoxseg6ei64.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_nxv2i8_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vsoxseg7ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_mask_nxv2i8_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vsoxseg7ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_nxv2i8_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vsoxseg7ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_mask_nxv2i8_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vsoxseg7ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_nxv2i8_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vsoxseg7ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_mask_nxv2i8_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vsoxseg7ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_nxv2i8_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
-; CHECK-NEXT: vmv1r.v v18, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vsoxseg7ei64.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_mask_nxv2i8_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
-; CHECK-NEXT: vmv1r.v v18, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vsoxseg7ei64.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_nxv2i8_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vsoxseg8ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_mask_nxv2i8_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vsoxseg8ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_nxv2i8_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vsoxseg8ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_mask_nxv2i8_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vsoxseg8ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_nxv2i8_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vsoxseg8ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_mask_nxv2i8_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vsoxseg8ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_nxv2i8_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
-; CHECK-NEXT: vmv1r.v v18, v12
-; CHECK-NEXT: vmv1r.v v19, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
+; CHECK-NEXT: vmv1r.v v19, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vsoxseg8ei64.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_mask_nxv2i8_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
-; CHECK-NEXT: vmv1r.v v18, v12
-; CHECK-NEXT: vmv1r.v v19, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
+; CHECK-NEXT: vmv1r.v v19, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vsoxseg8ei64.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
define void @test_vsoxseg2_nxv8i32_nxv8i16(<vscale x 8 x i32> %val, i32* %base, <vscale x 8 x i16> %index, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv8i32_nxv8i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
; CHECK-NEXT: vmv2r.v v16, v12
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
define void @test_vsoxseg2_mask_nxv8i32_nxv8i16(<vscale x 8 x i32> %val, i32* %base, <vscale x 8 x i16> %index, <vscale x 8 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv8i32_nxv8i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
; CHECK-NEXT: vmv2r.v v16, v12
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
define void @test_vsoxseg2_nxv8i32_nxv8i8(<vscale x 8 x i32> %val, i32* %base, <vscale x 8 x i8> %index, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv8i32_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
; CHECK-NEXT: vmv1r.v v16, v12
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
define void @test_vsoxseg2_mask_nxv8i32_nxv8i8(<vscale x 8 x i32> %val, i32* %base, <vscale x 8 x i8> %index, <vscale x 8 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv8i32_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
; CHECK-NEXT: vmv1r.v v16, v12
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
define void @test_vsoxseg2_nxv8i32_nxv8i64(<vscale x 8 x i32> %val, i32* %base, <vscale x 8 x i64> %index, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv8i32_nxv8i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
; CHECK-NEXT: vsoxseg2ei64.v v8, (a0), v16
define void @test_vsoxseg2_mask_nxv8i32_nxv8i64(<vscale x 8 x i32> %val, i32* %base, <vscale x 8 x i64> %index, <vscale x 8 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv8i32_nxv8i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
; CHECK-NEXT: vsoxseg2ei64.v v8, (a0), v16, v0.t
define void @test_vsoxseg2_nxv8i32_nxv8i32(<vscale x 8 x i32> %val, i32* %base, <vscale x 8 x i32> %index, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv8i32_nxv8i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
; CHECK-NEXT: vmv4r.v v16, v12
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
define void @test_vsoxseg2_mask_nxv8i32_nxv8i32(<vscale x 8 x i32> %val, i32* %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv8i32_nxv8i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
; CHECK-NEXT: vmv4r.v v16, v12
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
define void @test_vsoxseg2_nxv32i8_nxv32i16(<vscale x 32 x i8> %val, i8* %base, <vscale x 32 x i16> %index, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv32i8_nxv32i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu
; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v16
define void @test_vsoxseg2_mask_nxv32i8_nxv32i16(<vscale x 32 x i8> %val, i8* %base, <vscale x 32 x i16> %index, <vscale x 32 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv32i8_nxv32i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu
; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v16, v0.t
define void @test_vsoxseg2_nxv32i8_nxv32i8(<vscale x 32 x i8> %val, i8* %base, <vscale x 32 x i8> %index, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv32i8_nxv32i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
; CHECK-NEXT: vmv4r.v v16, v12
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu
define void @test_vsoxseg2_mask_nxv32i8_nxv32i8(<vscale x 32 x i8> %val, i8* %base, <vscale x 32 x i8> %index, <vscale x 32 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv32i8_nxv32i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
; CHECK-NEXT: vmv4r.v v16, v12
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu
define void @test_vsoxseg2_nxv2i16_nxv2i32(<vscale x 2 x i16> %val, i16* %base, <vscale x 2 x i32> %index, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv2i16_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
define void @test_vsoxseg2_mask_nxv2i16_nxv2i32(<vscale x 2 x i16> %val, i16* %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv2i16_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
define void @test_vsoxseg2_nxv2i16_nxv2i8(<vscale x 2 x i16> %val, i16* %base, <vscale x 2 x i8> %index, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv2i16_nxv2i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
define void @test_vsoxseg2_mask_nxv2i16_nxv2i8(<vscale x 2 x i16> %val, i16* %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv2i16_nxv2i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
define void @test_vsoxseg2_nxv2i16_nxv2i16(<vscale x 2 x i16> %val, i16* %base, <vscale x 2 x i16> %index, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv2i16_nxv2i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
define void @test_vsoxseg2_mask_nxv2i16_nxv2i16(<vscale x 2 x i16> %val, i16* %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv2i16_nxv2i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
define void @test_vsoxseg2_nxv2i16_nxv2i64(<vscale x 2 x i16> %val, i16* %base, <vscale x 2 x i64> %index, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv2i16_nxv2i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vsoxseg2ei64.v v8, (a0), v10
define void @test_vsoxseg2_mask_nxv2i16_nxv2i64(<vscale x 2 x i16> %val, i16* %base, <vscale x 2 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv2i16_nxv2i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vsoxseg2ei64.v v8, (a0), v10, v0.t
; CHECK-LABEL: test_vsoxseg3_nxv2i16_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vsoxseg3ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_mask_nxv2i16_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vsoxseg3ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_nxv2i16_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vsoxseg3ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_mask_nxv2i16_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vsoxseg3ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_nxv2i16_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vsoxseg3ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_mask_nxv2i16_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vsoxseg3ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
define void @test_vsoxseg3_nxv2i16_nxv2i64(<vscale x 2 x i16> %val, i16* %base, <vscale x 2 x i64> %index, i64 %vl) {
; CHECK-LABEL: test_vsoxseg3_nxv2i16_nxv2i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv2r.v v12, v10
+; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
-; CHECK-NEXT: vsoxseg3ei64.v v12, (a0), v10
+; CHECK-NEXT: vsoxseg3ei64.v v8, (a0), v12
; CHECK-NEXT: ret
entry:
tail call void @llvm.riscv.vsoxseg3.nxv2i16.nxv2i64(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, i16* %base, <vscale x 2 x i64> %index, i64 %vl)
define void @test_vsoxseg3_mask_nxv2i16_nxv2i64(<vscale x 2 x i16> %val, i16* %base, <vscale x 2 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsoxseg3_mask_nxv2i16_nxv2i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv2r.v v12, v10
+; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
-; CHECK-NEXT: vsoxseg3ei64.v v12, (a0), v10, v0.t
+; CHECK-NEXT: vsoxseg3ei64.v v8, (a0), v12, v0.t
; CHECK-NEXT: ret
entry:
tail call void @llvm.riscv.vsoxseg3.mask.nxv2i16.nxv2i64(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, i16* %base, <vscale x 2 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl)
; CHECK-LABEL: test_vsoxseg4_nxv2i16_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vsoxseg4ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_mask_nxv2i16_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vsoxseg4ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_nxv2i16_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vsoxseg4ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_mask_nxv2i16_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vsoxseg4ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_nxv2i16_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vsoxseg4ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_mask_nxv2i16_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vsoxseg4ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_nxv2i16_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vsoxseg4ei64.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_mask_nxv2i16_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vsoxseg4ei64.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_nxv2i16_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vsoxseg5ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_mask_nxv2i16_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vsoxseg5ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_nxv2i16_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vsoxseg5ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_mask_nxv2i16_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vsoxseg5ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_nxv2i16_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vsoxseg5ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_mask_nxv2i16_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vsoxseg5ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_nxv2i16_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vsoxseg5ei64.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_mask_nxv2i16_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vsoxseg5ei64.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_nxv2i16_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vsoxseg6ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_mask_nxv2i16_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vsoxseg6ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_nxv2i16_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vsoxseg6ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_mask_nxv2i16_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vsoxseg6ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_nxv2i16_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vsoxseg6ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_mask_nxv2i16_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vsoxseg6ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_nxv2i16_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vsoxseg6ei64.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_mask_nxv2i16_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vsoxseg6ei64.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_nxv2i16_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vsoxseg7ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_mask_nxv2i16_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vsoxseg7ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_nxv2i16_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vsoxseg7ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_mask_nxv2i16_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vsoxseg7ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_nxv2i16_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vsoxseg7ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_mask_nxv2i16_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vsoxseg7ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_nxv2i16_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
-; CHECK-NEXT: vmv1r.v v18, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vsoxseg7ei64.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_mask_nxv2i16_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
-; CHECK-NEXT: vmv1r.v v18, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vsoxseg7ei64.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_nxv2i16_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vsoxseg8ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_mask_nxv2i16_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vsoxseg8ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_nxv2i16_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vsoxseg8ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_mask_nxv2i16_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vsoxseg8ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_nxv2i16_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vsoxseg8ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_mask_nxv2i16_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vsoxseg8ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_nxv2i16_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
-; CHECK-NEXT: vmv1r.v v18, v12
-; CHECK-NEXT: vmv1r.v v19, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
+; CHECK-NEXT: vmv1r.v v19, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vsoxseg8ei64.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_mask_nxv2i16_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
-; CHECK-NEXT: vmv1r.v v18, v12
-; CHECK-NEXT: vmv1r.v v19, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
+; CHECK-NEXT: vmv1r.v v19, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vsoxseg8ei64.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
define void @test_vsoxseg2_nxv2i64_nxv2i32(<vscale x 2 x i64> %val, i64* %base, <vscale x 2 x i32> %index, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv2i64_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
; CHECK-NEXT: vmv1r.v v12, v10
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
define void @test_vsoxseg2_mask_nxv2i64_nxv2i32(<vscale x 2 x i64> %val, i64* %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv2i64_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
; CHECK-NEXT: vmv1r.v v12, v10
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
define void @test_vsoxseg2_nxv2i64_nxv2i8(<vscale x 2 x i64> %val, i64* %base, <vscale x 2 x i8> %index, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv2i64_nxv2i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
; CHECK-NEXT: vmv1r.v v12, v10
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
define void @test_vsoxseg2_mask_nxv2i64_nxv2i8(<vscale x 2 x i64> %val, i64* %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv2i64_nxv2i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
; CHECK-NEXT: vmv1r.v v12, v10
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
define void @test_vsoxseg2_nxv2i64_nxv2i16(<vscale x 2 x i64> %val, i64* %base, <vscale x 2 x i16> %index, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv2i64_nxv2i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
; CHECK-NEXT: vmv1r.v v12, v10
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
define void @test_vsoxseg2_mask_nxv2i64_nxv2i16(<vscale x 2 x i64> %val, i64* %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv2i64_nxv2i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
; CHECK-NEXT: vmv1r.v v12, v10
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
define void @test_vsoxseg2_nxv2i64_nxv2i64(<vscale x 2 x i64> %val, i64* %base, <vscale x 2 x i64> %index, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv2i64_nxv2i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
; CHECK-NEXT: vmv2r.v v12, v10
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
define void @test_vsoxseg2_mask_nxv2i64_nxv2i64(<vscale x 2 x i64> %val, i64* %base, <vscale x 2 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv2i64_nxv2i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
; CHECK-NEXT: vmv2r.v v12, v10
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
; CHECK-LABEL: test_vsoxseg3_nxv2i64_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
; CHECK-NEXT: vsoxseg3ei32.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_mask_nxv2i64_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
; CHECK-NEXT: vsoxseg3ei32.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_nxv2i64_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
; CHECK-NEXT: vsoxseg3ei8.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_mask_nxv2i64_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
; CHECK-NEXT: vsoxseg3ei8.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_nxv2i64_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
; CHECK-NEXT: vsoxseg3ei16.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_mask_nxv2i64_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
; CHECK-NEXT: vsoxseg3ei16.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_nxv2i64_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
; CHECK-NEXT: vsoxseg3ei64.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_mask_nxv2i64_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
; CHECK-NEXT: vsoxseg3ei64.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_nxv2i64_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
; CHECK-NEXT: vsoxseg4ei32.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_mask_nxv2i64_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
; CHECK-NEXT: vsoxseg4ei32.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_nxv2i64_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
; CHECK-NEXT: vsoxseg4ei8.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_mask_nxv2i64_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
; CHECK-NEXT: vsoxseg4ei8.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_nxv2i64_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
; CHECK-NEXT: vsoxseg4ei16.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_mask_nxv2i64_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
; CHECK-NEXT: vsoxseg4ei16.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_nxv2i64_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
; CHECK-NEXT: vsoxseg4ei64.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_mask_nxv2i64_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
; CHECK-NEXT: vsoxseg4ei64.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
define void @test_vsoxseg2_nxv16f16_nxv16i16(<vscale x 16 x half> %val, half* %base, <vscale x 16 x i16> %index, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv16f16_nxv16i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
; CHECK-NEXT: vmv4r.v v16, v12
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
define void @test_vsoxseg2_mask_nxv16f16_nxv16i16(<vscale x 16 x half> %val, half* %base, <vscale x 16 x i16> %index, <vscale x 16 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv16f16_nxv16i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
; CHECK-NEXT: vmv4r.v v16, v12
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
define void @test_vsoxseg2_nxv16f16_nxv16i8(<vscale x 16 x half> %val, half* %base, <vscale x 16 x i8> %index, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv16f16_nxv16i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
; CHECK-NEXT: vmv2r.v v16, v12
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
define void @test_vsoxseg2_mask_nxv16f16_nxv16i8(<vscale x 16 x half> %val, half* %base, <vscale x 16 x i8> %index, <vscale x 16 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv16f16_nxv16i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
; CHECK-NEXT: vmv2r.v v16, v12
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
define void @test_vsoxseg2_nxv16f16_nxv16i32(<vscale x 16 x half> %val, half* %base, <vscale x 16 x i32> %index, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv16f16_nxv16i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v16
define void @test_vsoxseg2_mask_nxv16f16_nxv16i32(<vscale x 16 x half> %val, half* %base, <vscale x 16 x i32> %index, <vscale x 16 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv16f16_nxv16i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v16, v0.t
define void @test_vsoxseg2_nxv4f64_nxv4i32(<vscale x 4 x double> %val, double* %base, <vscale x 4 x i32> %index, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv4f64_nxv4i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
; CHECK-NEXT: vmv2r.v v16, v12
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu
define void @test_vsoxseg2_mask_nxv4f64_nxv4i32(<vscale x 4 x double> %val, double* %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv4f64_nxv4i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
; CHECK-NEXT: vmv2r.v v16, v12
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu
define void @test_vsoxseg2_nxv4f64_nxv4i8(<vscale x 4 x double> %val, double* %base, <vscale x 4 x i8> %index, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv4f64_nxv4i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
; CHECK-NEXT: vmv1r.v v16, v12
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu
define void @test_vsoxseg2_mask_nxv4f64_nxv4i8(<vscale x 4 x double> %val, double* %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv4f64_nxv4i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
; CHECK-NEXT: vmv1r.v v16, v12
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu
define void @test_vsoxseg2_nxv4f64_nxv4i64(<vscale x 4 x double> %val, double* %base, <vscale x 4 x i64> %index, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv4f64_nxv4i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
; CHECK-NEXT: vmv4r.v v16, v12
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu
define void @test_vsoxseg2_mask_nxv4f64_nxv4i64(<vscale x 4 x double> %val, double* %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv4f64_nxv4i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
; CHECK-NEXT: vmv4r.v v16, v12
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu
define void @test_vsoxseg2_nxv4f64_nxv4i16(<vscale x 4 x double> %val, double* %base, <vscale x 4 x i16> %index, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv4f64_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
; CHECK-NEXT: vmv1r.v v16, v12
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu
define void @test_vsoxseg2_mask_nxv4f64_nxv4i16(<vscale x 4 x double> %val, double* %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv4f64_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
; CHECK-NEXT: vmv1r.v v16, v12
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu
define void @test_vsoxseg2_nxv1f64_nxv1i64(<vscale x 1 x double> %val, double* %base, <vscale x 1 x i64> %index, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv1f64_nxv1i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
define void @test_vsoxseg2_mask_nxv1f64_nxv1i64(<vscale x 1 x double> %val, double* %base, <vscale x 1 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv1f64_nxv1i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
define void @test_vsoxseg2_nxv1f64_nxv1i32(<vscale x 1 x double> %val, double* %base, <vscale x 1 x i32> %index, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv1f64_nxv1i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
define void @test_vsoxseg2_mask_nxv1f64_nxv1i32(<vscale x 1 x double> %val, double* %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv1f64_nxv1i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
define void @test_vsoxseg2_nxv1f64_nxv1i16(<vscale x 1 x double> %val, double* %base, <vscale x 1 x i16> %index, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv1f64_nxv1i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
define void @test_vsoxseg2_mask_nxv1f64_nxv1i16(<vscale x 1 x double> %val, double* %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv1f64_nxv1i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
define void @test_vsoxseg2_nxv1f64_nxv1i8(<vscale x 1 x double> %val, double* %base, <vscale x 1 x i8> %index, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv1f64_nxv1i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
define void @test_vsoxseg2_mask_nxv1f64_nxv1i8(<vscale x 1 x double> %val, double* %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv1f64_nxv1i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-LABEL: test_vsoxseg3_nxv1f64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vsoxseg3ei64.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_mask_nxv1f64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vsoxseg3ei64.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_nxv1f64_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vsoxseg3ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_mask_nxv1f64_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vsoxseg3ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_nxv1f64_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vsoxseg3ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_mask_nxv1f64_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vsoxseg3ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_nxv1f64_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vsoxseg3ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_mask_nxv1f64_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vsoxseg3ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_nxv1f64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vsoxseg4ei64.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_mask_nxv1f64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vsoxseg4ei64.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_nxv1f64_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vsoxseg4ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_mask_nxv1f64_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vsoxseg4ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_nxv1f64_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vsoxseg4ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_mask_nxv1f64_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vsoxseg4ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_nxv1f64_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vsoxseg4ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_mask_nxv1f64_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vsoxseg4ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_nxv1f64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vsoxseg5ei64.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_mask_nxv1f64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vsoxseg5ei64.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_nxv1f64_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vsoxseg5ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_mask_nxv1f64_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vsoxseg5ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_nxv1f64_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vsoxseg5ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_mask_nxv1f64_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vsoxseg5ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_nxv1f64_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vsoxseg5ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_mask_nxv1f64_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vsoxseg5ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_nxv1f64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vsoxseg6ei64.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_mask_nxv1f64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vsoxseg6ei64.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_nxv1f64_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vsoxseg6ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_mask_nxv1f64_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vsoxseg6ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_nxv1f64_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vsoxseg6ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_mask_nxv1f64_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vsoxseg6ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_nxv1f64_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vsoxseg6ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_mask_nxv1f64_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vsoxseg6ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_nxv1f64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vsoxseg7ei64.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_mask_nxv1f64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vsoxseg7ei64.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_nxv1f64_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vsoxseg7ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_mask_nxv1f64_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vsoxseg7ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_nxv1f64_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vsoxseg7ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_mask_nxv1f64_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vsoxseg7ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_nxv1f64_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vsoxseg7ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_mask_nxv1f64_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vsoxseg7ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_nxv1f64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vsoxseg8ei64.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_mask_nxv1f64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vsoxseg8ei64.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_nxv1f64_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vsoxseg8ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_mask_nxv1f64_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vsoxseg8ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_nxv1f64_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vsoxseg8ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_mask_nxv1f64_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vsoxseg8ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_nxv1f64_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vsoxseg8ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_mask_nxv1f64_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vsoxseg8ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
define void @test_vsoxseg2_nxv2f32_nxv2i32(<vscale x 2 x float> %val, float* %base, <vscale x 2 x i32> %index, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv2f32_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
define void @test_vsoxseg2_mask_nxv2f32_nxv2i32(<vscale x 2 x float> %val, float* %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv2f32_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
define void @test_vsoxseg2_nxv2f32_nxv2i8(<vscale x 2 x float> %val, float* %base, <vscale x 2 x i8> %index, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv2f32_nxv2i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
define void @test_vsoxseg2_mask_nxv2f32_nxv2i8(<vscale x 2 x float> %val, float* %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv2f32_nxv2i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
define void @test_vsoxseg2_nxv2f32_nxv2i16(<vscale x 2 x float> %val, float* %base, <vscale x 2 x i16> %index, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv2f32_nxv2i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
define void @test_vsoxseg2_mask_nxv2f32_nxv2i16(<vscale x 2 x float> %val, float* %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv2f32_nxv2i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
define void @test_vsoxseg2_nxv2f32_nxv2i64(<vscale x 2 x float> %val, float* %base, <vscale x 2 x i64> %index, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv2f32_nxv2i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vsoxseg2ei64.v v8, (a0), v10
define void @test_vsoxseg2_mask_nxv2f32_nxv2i64(<vscale x 2 x float> %val, float* %base, <vscale x 2 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv2f32_nxv2i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vsoxseg2ei64.v v8, (a0), v10, v0.t
; CHECK-LABEL: test_vsoxseg3_nxv2f32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vsoxseg3ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_mask_nxv2f32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vsoxseg3ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_nxv2f32_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vsoxseg3ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_mask_nxv2f32_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vsoxseg3ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_nxv2f32_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vsoxseg3ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_mask_nxv2f32_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vsoxseg3ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
define void @test_vsoxseg3_nxv2f32_nxv2i64(<vscale x 2 x float> %val, float* %base, <vscale x 2 x i64> %index, i64 %vl) {
; CHECK-LABEL: test_vsoxseg3_nxv2f32_nxv2i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv2r.v v12, v10
+; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
-; CHECK-NEXT: vsoxseg3ei64.v v12, (a0), v10
+; CHECK-NEXT: vsoxseg3ei64.v v8, (a0), v12
; CHECK-NEXT: ret
entry:
tail call void @llvm.riscv.vsoxseg3.nxv2f32.nxv2i64(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, float* %base, <vscale x 2 x i64> %index, i64 %vl)
define void @test_vsoxseg3_mask_nxv2f32_nxv2i64(<vscale x 2 x float> %val, float* %base, <vscale x 2 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsoxseg3_mask_nxv2f32_nxv2i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv2r.v v12, v10
+; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
-; CHECK-NEXT: vsoxseg3ei64.v v12, (a0), v10, v0.t
+; CHECK-NEXT: vsoxseg3ei64.v v8, (a0), v12, v0.t
; CHECK-NEXT: ret
entry:
tail call void @llvm.riscv.vsoxseg3.mask.nxv2f32.nxv2i64(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, float* %base, <vscale x 2 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl)
; CHECK-LABEL: test_vsoxseg4_nxv2f32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vsoxseg4ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_mask_nxv2f32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vsoxseg4ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_nxv2f32_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vsoxseg4ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_mask_nxv2f32_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vsoxseg4ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_nxv2f32_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vsoxseg4ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_mask_nxv2f32_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vsoxseg4ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_nxv2f32_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vsoxseg4ei64.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_mask_nxv2f32_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vsoxseg4ei64.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_nxv2f32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vsoxseg5ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_mask_nxv2f32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vsoxseg5ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_nxv2f32_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vsoxseg5ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_mask_nxv2f32_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vsoxseg5ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_nxv2f32_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vsoxseg5ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_mask_nxv2f32_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vsoxseg5ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_nxv2f32_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vsoxseg5ei64.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_mask_nxv2f32_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vsoxseg5ei64.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_nxv2f32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vsoxseg6ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_mask_nxv2f32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vsoxseg6ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_nxv2f32_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vsoxseg6ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_mask_nxv2f32_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vsoxseg6ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_nxv2f32_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vsoxseg6ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_mask_nxv2f32_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vsoxseg6ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_nxv2f32_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vsoxseg6ei64.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_mask_nxv2f32_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vsoxseg6ei64.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_nxv2f32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vsoxseg7ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_mask_nxv2f32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vsoxseg7ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_nxv2f32_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vsoxseg7ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_mask_nxv2f32_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vsoxseg7ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_nxv2f32_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vsoxseg7ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_mask_nxv2f32_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vsoxseg7ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_nxv2f32_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
-; CHECK-NEXT: vmv1r.v v18, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vsoxseg7ei64.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_mask_nxv2f32_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
-; CHECK-NEXT: vmv1r.v v18, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vsoxseg7ei64.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_nxv2f32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vsoxseg8ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_mask_nxv2f32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vsoxseg8ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_nxv2f32_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vsoxseg8ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_mask_nxv2f32_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vsoxseg8ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_nxv2f32_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vsoxseg8ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_mask_nxv2f32_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vsoxseg8ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_nxv2f32_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
-; CHECK-NEXT: vmv1r.v v18, v12
-; CHECK-NEXT: vmv1r.v v19, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
+; CHECK-NEXT: vmv1r.v v19, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vsoxseg8ei64.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_mask_nxv2f32_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
-; CHECK-NEXT: vmv1r.v v18, v12
-; CHECK-NEXT: vmv1r.v v19, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
+; CHECK-NEXT: vmv1r.v v19, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vsoxseg8ei64.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
define void @test_vsoxseg2_nxv1f16_nxv1i64(<vscale x 1 x half> %val, half* %base, <vscale x 1 x i64> %index, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv1f16_nxv1i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
define void @test_vsoxseg2_mask_nxv1f16_nxv1i64(<vscale x 1 x half> %val, half* %base, <vscale x 1 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv1f16_nxv1i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
define void @test_vsoxseg2_nxv1f16_nxv1i32(<vscale x 1 x half> %val, half* %base, <vscale x 1 x i32> %index, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv1f16_nxv1i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
define void @test_vsoxseg2_mask_nxv1f16_nxv1i32(<vscale x 1 x half> %val, half* %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv1f16_nxv1i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
define void @test_vsoxseg2_nxv1f16_nxv1i16(<vscale x 1 x half> %val, half* %base, <vscale x 1 x i16> %index, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv1f16_nxv1i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
define void @test_vsoxseg2_mask_nxv1f16_nxv1i16(<vscale x 1 x half> %val, half* %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv1f16_nxv1i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
define void @test_vsoxseg2_nxv1f16_nxv1i8(<vscale x 1 x half> %val, half* %base, <vscale x 1 x i8> %index, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv1f16_nxv1i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
define void @test_vsoxseg2_mask_nxv1f16_nxv1i8(<vscale x 1 x half> %val, half* %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv1f16_nxv1i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-LABEL: test_vsoxseg3_nxv1f16_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vsoxseg3ei64.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_mask_nxv1f16_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vsoxseg3ei64.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_nxv1f16_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vsoxseg3ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_mask_nxv1f16_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vsoxseg3ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_nxv1f16_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vsoxseg3ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_mask_nxv1f16_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vsoxseg3ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_nxv1f16_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vsoxseg3ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_mask_nxv1f16_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vsoxseg3ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_nxv1f16_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vsoxseg4ei64.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_mask_nxv1f16_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vsoxseg4ei64.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_nxv1f16_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vsoxseg4ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_mask_nxv1f16_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vsoxseg4ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_nxv1f16_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vsoxseg4ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_mask_nxv1f16_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vsoxseg4ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_nxv1f16_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vsoxseg4ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_mask_nxv1f16_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vsoxseg4ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_nxv1f16_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vsoxseg5ei64.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_mask_nxv1f16_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vsoxseg5ei64.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_nxv1f16_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vsoxseg5ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_mask_nxv1f16_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vsoxseg5ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_nxv1f16_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vsoxseg5ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_mask_nxv1f16_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vsoxseg5ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_nxv1f16_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vsoxseg5ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_mask_nxv1f16_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vsoxseg5ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_nxv1f16_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vsoxseg6ei64.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_mask_nxv1f16_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vsoxseg6ei64.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_nxv1f16_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vsoxseg6ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_mask_nxv1f16_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vsoxseg6ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_nxv1f16_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vsoxseg6ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_mask_nxv1f16_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vsoxseg6ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_nxv1f16_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vsoxseg6ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_mask_nxv1f16_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vsoxseg6ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_nxv1f16_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vsoxseg7ei64.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_mask_nxv1f16_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vsoxseg7ei64.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_nxv1f16_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vsoxseg7ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_mask_nxv1f16_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vsoxseg7ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_nxv1f16_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vsoxseg7ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_mask_nxv1f16_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vsoxseg7ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_nxv1f16_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vsoxseg7ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_mask_nxv1f16_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vsoxseg7ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_nxv1f16_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vsoxseg8ei64.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_mask_nxv1f16_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vsoxseg8ei64.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_nxv1f16_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vsoxseg8ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_mask_nxv1f16_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vsoxseg8ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_nxv1f16_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vsoxseg8ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_mask_nxv1f16_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vsoxseg8ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_nxv1f16_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vsoxseg8ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_mask_nxv1f16_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vsoxseg8ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
define void @test_vsoxseg2_nxv1f32_nxv1i64(<vscale x 1 x float> %val, float* %base, <vscale x 1 x i64> %index, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv1f32_nxv1i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
define void @test_vsoxseg2_mask_nxv1f32_nxv1i64(<vscale x 1 x float> %val, float* %base, <vscale x 1 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv1f32_nxv1i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
define void @test_vsoxseg2_nxv1f32_nxv1i32(<vscale x 1 x float> %val, float* %base, <vscale x 1 x i32> %index, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv1f32_nxv1i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
define void @test_vsoxseg2_mask_nxv1f32_nxv1i32(<vscale x 1 x float> %val, float* %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv1f32_nxv1i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
define void @test_vsoxseg2_nxv1f32_nxv1i16(<vscale x 1 x float> %val, float* %base, <vscale x 1 x i16> %index, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv1f32_nxv1i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
define void @test_vsoxseg2_mask_nxv1f32_nxv1i16(<vscale x 1 x float> %val, float* %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv1f32_nxv1i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
define void @test_vsoxseg2_nxv1f32_nxv1i8(<vscale x 1 x float> %val, float* %base, <vscale x 1 x i8> %index, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv1f32_nxv1i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
define void @test_vsoxseg2_mask_nxv1f32_nxv1i8(<vscale x 1 x float> %val, float* %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv1f32_nxv1i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-LABEL: test_vsoxseg3_nxv1f32_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vsoxseg3ei64.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_mask_nxv1f32_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vsoxseg3ei64.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_nxv1f32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vsoxseg3ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_mask_nxv1f32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vsoxseg3ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_nxv1f32_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vsoxseg3ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_mask_nxv1f32_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vsoxseg3ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_nxv1f32_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vsoxseg3ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_mask_nxv1f32_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vsoxseg3ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_nxv1f32_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vsoxseg4ei64.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_mask_nxv1f32_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vsoxseg4ei64.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_nxv1f32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vsoxseg4ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_mask_nxv1f32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vsoxseg4ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_nxv1f32_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vsoxseg4ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_mask_nxv1f32_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vsoxseg4ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_nxv1f32_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vsoxseg4ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_mask_nxv1f32_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vsoxseg4ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_nxv1f32_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vsoxseg5ei64.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_mask_nxv1f32_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vsoxseg5ei64.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_nxv1f32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vsoxseg5ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_mask_nxv1f32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vsoxseg5ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_nxv1f32_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vsoxseg5ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_mask_nxv1f32_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vsoxseg5ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_nxv1f32_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vsoxseg5ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_mask_nxv1f32_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vsoxseg5ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_nxv1f32_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vsoxseg6ei64.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_mask_nxv1f32_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vsoxseg6ei64.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_nxv1f32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vsoxseg6ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_mask_nxv1f32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vsoxseg6ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_nxv1f32_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vsoxseg6ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_mask_nxv1f32_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vsoxseg6ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_nxv1f32_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vsoxseg6ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_mask_nxv1f32_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vsoxseg6ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_nxv1f32_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vsoxseg7ei64.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_mask_nxv1f32_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vsoxseg7ei64.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_nxv1f32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vsoxseg7ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_mask_nxv1f32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vsoxseg7ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_nxv1f32_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vsoxseg7ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_mask_nxv1f32_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vsoxseg7ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_nxv1f32_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vsoxseg7ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_mask_nxv1f32_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vsoxseg7ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_nxv1f32_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vsoxseg8ei64.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_mask_nxv1f32_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vsoxseg8ei64.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_nxv1f32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vsoxseg8ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_mask_nxv1f32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vsoxseg8ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_nxv1f32_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vsoxseg8ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_mask_nxv1f32_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vsoxseg8ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_nxv1f32_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vsoxseg8ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_mask_nxv1f32_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vsoxseg8ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
define void @test_vsoxseg2_nxv8f16_nxv8i16(<vscale x 8 x half> %val, half* %base, <vscale x 8 x i16> %index, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv8f16_nxv8i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
; CHECK-NEXT: vmv2r.v v12, v10
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
define void @test_vsoxseg2_mask_nxv8f16_nxv8i16(<vscale x 8 x half> %val, half* %base, <vscale x 8 x i16> %index, <vscale x 8 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv8f16_nxv8i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
; CHECK-NEXT: vmv2r.v v12, v10
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
define void @test_vsoxseg2_nxv8f16_nxv8i8(<vscale x 8 x half> %val, half* %base, <vscale x 8 x i8> %index, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv8f16_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
; CHECK-NEXT: vmv1r.v v12, v10
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
define void @test_vsoxseg2_mask_nxv8f16_nxv8i8(<vscale x 8 x half> %val, half* %base, <vscale x 8 x i8> %index, <vscale x 8 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv8f16_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
; CHECK-NEXT: vmv1r.v v12, v10
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
define void @test_vsoxseg2_nxv8f16_nxv8i64(<vscale x 8 x half> %val, half* %base, <vscale x 8 x i64> %index, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv8f16_nxv8i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
; CHECK-NEXT: vsoxseg2ei64.v v8, (a0), v16
define void @test_vsoxseg2_mask_nxv8f16_nxv8i64(<vscale x 8 x half> %val, half* %base, <vscale x 8 x i64> %index, <vscale x 8 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv8f16_nxv8i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
; CHECK-NEXT: vsoxseg2ei64.v v8, (a0), v16, v0.t
define void @test_vsoxseg2_nxv8f16_nxv8i32(<vscale x 8 x half> %val, half* %base, <vscale x 8 x i32> %index, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv8f16_nxv8i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v12
define void @test_vsoxseg2_mask_nxv8f16_nxv8i32(<vscale x 8 x half> %val, half* %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv8f16_nxv8i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v12, v0.t
; CHECK-LABEL: test_vsoxseg3_nxv8f16_nxv8i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
; CHECK-NEXT: vsoxseg3ei16.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_mask_nxv8f16_nxv8i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
; CHECK-NEXT: vsoxseg3ei16.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_nxv8f16_nxv8i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
; CHECK-NEXT: vsoxseg3ei8.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_mask_nxv8f16_nxv8i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
; CHECK-NEXT: vsoxseg3ei8.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
define void @test_vsoxseg3_nxv8f16_nxv8i64(<vscale x 8 x half> %val, half* %base, <vscale x 8 x i64> %index, i64 %vl) {
; CHECK-LABEL: test_vsoxseg3_nxv8f16_nxv8i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
define void @test_vsoxseg3_mask_nxv8f16_nxv8i64(<vscale x 8 x half> %val, half* %base, <vscale x 8 x i64> %index, <vscale x 8 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsoxseg3_mask_nxv8f16_nxv8i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
define void @test_vsoxseg3_nxv8f16_nxv8i32(<vscale x 8 x half> %val, half* %base, <vscale x 8 x i32> %index, i64 %vl) {
; CHECK-LABEL: test_vsoxseg3_nxv8f16_nxv8i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv2r.v v16, v8
-; CHECK-NEXT: vmv2r.v v18, v16
-; CHECK-NEXT: vmv2r.v v20, v16
+; CHECK-NEXT: vmv2r.v v10, v8
+; CHECK-NEXT: vmv4r.v v16, v12
+; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
-; CHECK-NEXT: vsoxseg3ei32.v v16, (a0), v12
+; CHECK-NEXT: vsoxseg3ei32.v v8, (a0), v16
; CHECK-NEXT: ret
entry:
tail call void @llvm.riscv.vsoxseg3.nxv8f16.nxv8i32(<vscale x 8 x half> %val,<vscale x 8 x half> %val,<vscale x 8 x half> %val, half* %base, <vscale x 8 x i32> %index, i64 %vl)
define void @test_vsoxseg3_mask_nxv8f16_nxv8i32(<vscale x 8 x half> %val, half* %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsoxseg3_mask_nxv8f16_nxv8i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv2r.v v16, v8
-; CHECK-NEXT: vmv2r.v v18, v16
-; CHECK-NEXT: vmv2r.v v20, v16
+; CHECK-NEXT: vmv2r.v v10, v8
+; CHECK-NEXT: vmv4r.v v16, v12
+; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
-; CHECK-NEXT: vsoxseg3ei32.v v16, (a0), v12, v0.t
+; CHECK-NEXT: vsoxseg3ei32.v v8, (a0), v16, v0.t
; CHECK-NEXT: ret
entry:
tail call void @llvm.riscv.vsoxseg3.mask.nxv8f16.nxv8i32(<vscale x 8 x half> %val,<vscale x 8 x half> %val,<vscale x 8 x half> %val, half* %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i64 %vl)
; CHECK-LABEL: test_vsoxseg4_nxv8f16_nxv8i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
; CHECK-NEXT: vsoxseg4ei16.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_mask_nxv8f16_nxv8i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
; CHECK-NEXT: vsoxseg4ei16.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_nxv8f16_nxv8i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
; CHECK-NEXT: vsoxseg4ei8.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_mask_nxv8f16_nxv8i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
; CHECK-NEXT: vsoxseg4ei8.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
define void @test_vsoxseg4_nxv8f16_nxv8i64(<vscale x 8 x half> %val, half* %base, <vscale x 8 x i64> %index, i64 %vl) {
; CHECK-LABEL: test_vsoxseg4_nxv8f16_nxv8i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2_v14m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vmv2r.v v14, v8
define void @test_vsoxseg4_mask_nxv8f16_nxv8i64(<vscale x 8 x half> %val, half* %base, <vscale x 8 x i64> %index, <vscale x 8 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsoxseg4_mask_nxv8f16_nxv8i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2_v14m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vmv2r.v v14, v8
; CHECK-LABEL: test_vsoxseg4_nxv8f16_nxv8i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v16, v8
-; CHECK-NEXT: vmv2r.v v18, v16
-; CHECK-NEXT: vmv2r.v v20, v16
-; CHECK-NEXT: vmv2r.v v22, v16
+; CHECK-NEXT: vmv2r.v v18, v8
+; CHECK-NEXT: vmv2r.v v20, v8
+; CHECK-NEXT: vmv2r.v v22, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
; CHECK-NEXT: vsoxseg4ei32.v v16, (a0), v12
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_mask_nxv8f16_nxv8i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v16, v8
-; CHECK-NEXT: vmv2r.v v18, v16
-; CHECK-NEXT: vmv2r.v v20, v16
-; CHECK-NEXT: vmv2r.v v22, v16
+; CHECK-NEXT: vmv2r.v v18, v8
+; CHECK-NEXT: vmv2r.v v20, v8
+; CHECK-NEXT: vmv2r.v v22, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
; CHECK-NEXT: vsoxseg4ei32.v v16, (a0), v12, v0.t
; CHECK-NEXT: ret
define void @test_vsoxseg2_nxv8f32_nxv8i16(<vscale x 8 x float> %val, float* %base, <vscale x 8 x i16> %index, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv8f32_nxv8i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
; CHECK-NEXT: vmv2r.v v16, v12
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
define void @test_vsoxseg2_mask_nxv8f32_nxv8i16(<vscale x 8 x float> %val, float* %base, <vscale x 8 x i16> %index, <vscale x 8 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv8f32_nxv8i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
; CHECK-NEXT: vmv2r.v v16, v12
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
define void @test_vsoxseg2_nxv8f32_nxv8i8(<vscale x 8 x float> %val, float* %base, <vscale x 8 x i8> %index, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv8f32_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
; CHECK-NEXT: vmv1r.v v16, v12
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
define void @test_vsoxseg2_mask_nxv8f32_nxv8i8(<vscale x 8 x float> %val, float* %base, <vscale x 8 x i8> %index, <vscale x 8 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv8f32_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
; CHECK-NEXT: vmv1r.v v16, v12
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
define void @test_vsoxseg2_nxv8f32_nxv8i64(<vscale x 8 x float> %val, float* %base, <vscale x 8 x i64> %index, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv8f32_nxv8i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
; CHECK-NEXT: vsoxseg2ei64.v v8, (a0), v16
define void @test_vsoxseg2_mask_nxv8f32_nxv8i64(<vscale x 8 x float> %val, float* %base, <vscale x 8 x i64> %index, <vscale x 8 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv8f32_nxv8i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
; CHECK-NEXT: vsoxseg2ei64.v v8, (a0), v16, v0.t
define void @test_vsoxseg2_nxv8f32_nxv8i32(<vscale x 8 x float> %val, float* %base, <vscale x 8 x i32> %index, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv8f32_nxv8i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
; CHECK-NEXT: vmv4r.v v16, v12
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
define void @test_vsoxseg2_mask_nxv8f32_nxv8i32(<vscale x 8 x float> %val, float* %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv8f32_nxv8i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
; CHECK-NEXT: vmv4r.v v16, v12
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
define void @test_vsoxseg2_nxv2f64_nxv2i32(<vscale x 2 x double> %val, double* %base, <vscale x 2 x i32> %index, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv2f64_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
; CHECK-NEXT: vmv1r.v v12, v10
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
define void @test_vsoxseg2_mask_nxv2f64_nxv2i32(<vscale x 2 x double> %val, double* %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv2f64_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
; CHECK-NEXT: vmv1r.v v12, v10
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
define void @test_vsoxseg2_nxv2f64_nxv2i8(<vscale x 2 x double> %val, double* %base, <vscale x 2 x i8> %index, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv2f64_nxv2i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
; CHECK-NEXT: vmv1r.v v12, v10
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
define void @test_vsoxseg2_mask_nxv2f64_nxv2i8(<vscale x 2 x double> %val, double* %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv2f64_nxv2i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
; CHECK-NEXT: vmv1r.v v12, v10
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
define void @test_vsoxseg2_nxv2f64_nxv2i16(<vscale x 2 x double> %val, double* %base, <vscale x 2 x i16> %index, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv2f64_nxv2i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
; CHECK-NEXT: vmv1r.v v12, v10
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
define void @test_vsoxseg2_mask_nxv2f64_nxv2i16(<vscale x 2 x double> %val, double* %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv2f64_nxv2i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
; CHECK-NEXT: vmv1r.v v12, v10
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
define void @test_vsoxseg2_nxv2f64_nxv2i64(<vscale x 2 x double> %val, double* %base, <vscale x 2 x i64> %index, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv2f64_nxv2i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
; CHECK-NEXT: vmv2r.v v12, v10
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
define void @test_vsoxseg2_mask_nxv2f64_nxv2i64(<vscale x 2 x double> %val, double* %base, <vscale x 2 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv2f64_nxv2i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
; CHECK-NEXT: vmv2r.v v12, v10
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
; CHECK-LABEL: test_vsoxseg3_nxv2f64_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
; CHECK-NEXT: vsoxseg3ei32.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_mask_nxv2f64_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
; CHECK-NEXT: vsoxseg3ei32.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_nxv2f64_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
; CHECK-NEXT: vsoxseg3ei8.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_mask_nxv2f64_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
; CHECK-NEXT: vsoxseg3ei8.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_nxv2f64_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
; CHECK-NEXT: vsoxseg3ei16.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_mask_nxv2f64_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
; CHECK-NEXT: vsoxseg3ei16.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_nxv2f64_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
; CHECK-NEXT: vsoxseg3ei64.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_mask_nxv2f64_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
; CHECK-NEXT: vsoxseg3ei64.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_nxv2f64_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
; CHECK-NEXT: vsoxseg4ei32.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_mask_nxv2f64_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
; CHECK-NEXT: vsoxseg4ei32.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_nxv2f64_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
; CHECK-NEXT: vsoxseg4ei8.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_mask_nxv2f64_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
; CHECK-NEXT: vsoxseg4ei8.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_nxv2f64_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
; CHECK-NEXT: vsoxseg4ei16.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_mask_nxv2f64_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
; CHECK-NEXT: vsoxseg4ei16.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_nxv2f64_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
; CHECK-NEXT: vsoxseg4ei64.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_mask_nxv2f64_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
; CHECK-NEXT: vsoxseg4ei64.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
define void @test_vsoxseg2_nxv4f16_nxv4i32(<vscale x 4 x half> %val, half* %base, <vscale x 4 x i32> %index, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv4f16_nxv4i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v10
define void @test_vsoxseg2_mask_nxv4f16_nxv4i32(<vscale x 4 x half> %val, half* %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv4f16_nxv4i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vsoxseg2ei32.v v8, (a0), v10, v0.t
define void @test_vsoxseg2_nxv4f16_nxv4i8(<vscale x 4 x half> %val, half* %base, <vscale x 4 x i8> %index, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv4f16_nxv4i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
define void @test_vsoxseg2_mask_nxv4f16_nxv4i8(<vscale x 4 x half> %val, half* %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv4f16_nxv4i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
define void @test_vsoxseg2_nxv4f16_nxv4i64(<vscale x 4 x half> %val, half* %base, <vscale x 4 x i64> %index, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv4f16_nxv4i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vsoxseg2ei64.v v8, (a0), v12
define void @test_vsoxseg2_mask_nxv4f16_nxv4i64(<vscale x 4 x half> %val, half* %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv4f16_nxv4i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vsoxseg2ei64.v v8, (a0), v12, v0.t
define void @test_vsoxseg2_nxv4f16_nxv4i16(<vscale x 4 x half> %val, half* %base, <vscale x 4 x i16> %index, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv4f16_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
define void @test_vsoxseg2_mask_nxv4f16_nxv4i16(<vscale x 4 x half> %val, half* %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv4f16_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
define void @test_vsoxseg3_nxv4f16_nxv4i32(<vscale x 4 x half> %val, half* %base, <vscale x 4 x i32> %index, i64 %vl) {
; CHECK-LABEL: test_vsoxseg3_nxv4f16_nxv4i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv2r.v v12, v10
+; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
-; CHECK-NEXT: vsoxseg3ei32.v v12, (a0), v10
+; CHECK-NEXT: vsoxseg3ei32.v v8, (a0), v12
; CHECK-NEXT: ret
entry:
tail call void @llvm.riscv.vsoxseg3.nxv4f16.nxv4i32(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, <vscale x 4 x i32> %index, i64 %vl)
define void @test_vsoxseg3_mask_nxv4f16_nxv4i32(<vscale x 4 x half> %val, half* %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsoxseg3_mask_nxv4f16_nxv4i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv2r.v v12, v10
+; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
-; CHECK-NEXT: vsoxseg3ei32.v v12, (a0), v10, v0.t
+; CHECK-NEXT: vsoxseg3ei32.v v8, (a0), v12, v0.t
; CHECK-NEXT: ret
entry:
tail call void @llvm.riscv.vsoxseg3.mask.nxv4f16.nxv4i32(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl)
; CHECK-LABEL: test_vsoxseg3_nxv4f16_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vsoxseg3ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_mask_nxv4f16_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vsoxseg3ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
define void @test_vsoxseg3_nxv4f16_nxv4i64(<vscale x 4 x half> %val, half* %base, <vscale x 4 x i64> %index, i64 %vl) {
; CHECK-LABEL: test_vsoxseg3_nxv4f16_nxv4i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
define void @test_vsoxseg3_mask_nxv4f16_nxv4i64(<vscale x 4 x half> %val, half* %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsoxseg3_mask_nxv4f16_nxv4i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-LABEL: test_vsoxseg3_nxv4f16_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vsoxseg3ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_mask_nxv4f16_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vsoxseg3ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_nxv4f16_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vsoxseg4ei32.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_mask_nxv4f16_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vsoxseg4ei32.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_nxv4f16_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vsoxseg4ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_mask_nxv4f16_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vsoxseg4ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
define void @test_vsoxseg4_nxv4f16_nxv4i64(<vscale x 4 x half> %val, half* %base, <vscale x 4 x i64> %index, i64 %vl) {
; CHECK-LABEL: test_vsoxseg4_nxv4f16_nxv4i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsoxseg4_mask_nxv4f16_nxv4i64(<vscale x 4 x half> %val, half* %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsoxseg4_mask_nxv4f16_nxv4i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
; CHECK-LABEL: test_vsoxseg4_nxv4f16_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vsoxseg4ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_mask_nxv4f16_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vsoxseg4ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_nxv4f16_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vsoxseg5ei32.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_mask_nxv4f16_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vsoxseg5ei32.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_nxv4f16_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vsoxseg5ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_mask_nxv4f16_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vsoxseg5ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
define void @test_vsoxseg5_nxv4f16_nxv4i64(<vscale x 4 x half> %val, half* %base, <vscale x 4 x i64> %index, i64 %vl) {
; CHECK-LABEL: test_vsoxseg5_nxv4f16_nxv4i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v16, v8
-; CHECK-NEXT: vmv1r.v v17, v16
-; CHECK-NEXT: vmv1r.v v18, v16
-; CHECK-NEXT: vmv1r.v v19, v16
-; CHECK-NEXT: vmv1r.v v20, v16
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv4r.v v16, v12
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
-; CHECK-NEXT: vsoxseg5ei64.v v16, (a0), v12
+; CHECK-NEXT: vsoxseg5ei64.v v8, (a0), v16
; CHECK-NEXT: ret
entry:
tail call void @llvm.riscv.vsoxseg5.nxv4f16.nxv4i64(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, <vscale x 4 x i64> %index, i64 %vl)
define void @test_vsoxseg5_mask_nxv4f16_nxv4i64(<vscale x 4 x half> %val, half* %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsoxseg5_mask_nxv4f16_nxv4i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v16, v8
-; CHECK-NEXT: vmv1r.v v17, v16
-; CHECK-NEXT: vmv1r.v v18, v16
-; CHECK-NEXT: vmv1r.v v19, v16
-; CHECK-NEXT: vmv1r.v v20, v16
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv4r.v v16, v12
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
-; CHECK-NEXT: vsoxseg5ei64.v v16, (a0), v12, v0.t
+; CHECK-NEXT: vsoxseg5ei64.v v8, (a0), v16, v0.t
; CHECK-NEXT: ret
entry:
tail call void @llvm.riscv.vsoxseg5.mask.nxv4f16.nxv4i64(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl)
; CHECK-LABEL: test_vsoxseg5_nxv4f16_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vsoxseg5ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_mask_nxv4f16_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vsoxseg5ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_nxv4f16_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vsoxseg6ei32.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_mask_nxv4f16_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vsoxseg6ei32.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_nxv4f16_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vsoxseg6ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_mask_nxv4f16_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vsoxseg6ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_nxv4f16_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v16, v8
-; CHECK-NEXT: vmv1r.v v17, v16
-; CHECK-NEXT: vmv1r.v v18, v16
-; CHECK-NEXT: vmv1r.v v19, v16
-; CHECK-NEXT: vmv1r.v v20, v16
-; CHECK-NEXT: vmv1r.v v21, v16
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
+; CHECK-NEXT: vmv1r.v v19, v8
+; CHECK-NEXT: vmv1r.v v20, v8
+; CHECK-NEXT: vmv1r.v v21, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vsoxseg6ei64.v v16, (a0), v12
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_mask_nxv4f16_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v16, v8
-; CHECK-NEXT: vmv1r.v v17, v16
-; CHECK-NEXT: vmv1r.v v18, v16
-; CHECK-NEXT: vmv1r.v v19, v16
-; CHECK-NEXT: vmv1r.v v20, v16
-; CHECK-NEXT: vmv1r.v v21, v16
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
+; CHECK-NEXT: vmv1r.v v19, v8
+; CHECK-NEXT: vmv1r.v v20, v8
+; CHECK-NEXT: vmv1r.v v21, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vsoxseg6ei64.v v16, (a0), v12, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_nxv4f16_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vsoxseg6ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_mask_nxv4f16_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vsoxseg6ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_nxv4f16_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
-; CHECK-NEXT: vmv1r.v v18, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vsoxseg7ei32.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_mask_nxv4f16_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
-; CHECK-NEXT: vmv1r.v v18, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vsoxseg7ei32.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_nxv4f16_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vsoxseg7ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_mask_nxv4f16_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vsoxseg7ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_nxv4f16_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v16, v8
-; CHECK-NEXT: vmv1r.v v17, v16
-; CHECK-NEXT: vmv1r.v v18, v16
-; CHECK-NEXT: vmv1r.v v19, v16
-; CHECK-NEXT: vmv1r.v v20, v16
-; CHECK-NEXT: vmv1r.v v21, v16
-; CHECK-NEXT: vmv1r.v v22, v16
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
+; CHECK-NEXT: vmv1r.v v19, v8
+; CHECK-NEXT: vmv1r.v v20, v8
+; CHECK-NEXT: vmv1r.v v21, v8
+; CHECK-NEXT: vmv1r.v v22, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vsoxseg7ei64.v v16, (a0), v12
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_mask_nxv4f16_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v16, v8
-; CHECK-NEXT: vmv1r.v v17, v16
-; CHECK-NEXT: vmv1r.v v18, v16
-; CHECK-NEXT: vmv1r.v v19, v16
-; CHECK-NEXT: vmv1r.v v20, v16
-; CHECK-NEXT: vmv1r.v v21, v16
-; CHECK-NEXT: vmv1r.v v22, v16
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
+; CHECK-NEXT: vmv1r.v v19, v8
+; CHECK-NEXT: vmv1r.v v20, v8
+; CHECK-NEXT: vmv1r.v v21, v8
+; CHECK-NEXT: vmv1r.v v22, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vsoxseg7ei64.v v16, (a0), v12, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_nxv4f16_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vsoxseg7ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_mask_nxv4f16_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vsoxseg7ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_nxv4f16_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
-; CHECK-NEXT: vmv1r.v v18, v12
-; CHECK-NEXT: vmv1r.v v19, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
+; CHECK-NEXT: vmv1r.v v19, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vsoxseg8ei32.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_mask_nxv4f16_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
-; CHECK-NEXT: vmv1r.v v18, v12
-; CHECK-NEXT: vmv1r.v v19, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
+; CHECK-NEXT: vmv1r.v v19, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vsoxseg8ei32.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_nxv4f16_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vsoxseg8ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_mask_nxv4f16_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vsoxseg8ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_nxv4f16_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v16, v8
-; CHECK-NEXT: vmv1r.v v17, v16
-; CHECK-NEXT: vmv1r.v v18, v16
-; CHECK-NEXT: vmv1r.v v19, v16
-; CHECK-NEXT: vmv1r.v v20, v16
-; CHECK-NEXT: vmv1r.v v21, v16
-; CHECK-NEXT: vmv1r.v v22, v16
-; CHECK-NEXT: vmv1r.v v23, v16
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
+; CHECK-NEXT: vmv1r.v v19, v8
+; CHECK-NEXT: vmv1r.v v20, v8
+; CHECK-NEXT: vmv1r.v v21, v8
+; CHECK-NEXT: vmv1r.v v22, v8
+; CHECK-NEXT: vmv1r.v v23, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vsoxseg8ei64.v v16, (a0), v12
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_mask_nxv4f16_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v16, v8
-; CHECK-NEXT: vmv1r.v v17, v16
-; CHECK-NEXT: vmv1r.v v18, v16
-; CHECK-NEXT: vmv1r.v v19, v16
-; CHECK-NEXT: vmv1r.v v20, v16
-; CHECK-NEXT: vmv1r.v v21, v16
-; CHECK-NEXT: vmv1r.v v22, v16
-; CHECK-NEXT: vmv1r.v v23, v16
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
+; CHECK-NEXT: vmv1r.v v19, v8
+; CHECK-NEXT: vmv1r.v v20, v8
+; CHECK-NEXT: vmv1r.v v21, v8
+; CHECK-NEXT: vmv1r.v v22, v8
+; CHECK-NEXT: vmv1r.v v23, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vsoxseg8ei64.v v16, (a0), v12, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_nxv4f16_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vsoxseg8ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_mask_nxv4f16_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vsoxseg8ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
define void @test_vsoxseg2_nxv2f16_nxv2i32(<vscale x 2 x half> %val, half* %base, <vscale x 2 x i32> %index, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv2f16_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
define void @test_vsoxseg2_mask_nxv2f16_nxv2i32(<vscale x 2 x half> %val, half* %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv2f16_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
define void @test_vsoxseg2_nxv2f16_nxv2i8(<vscale x 2 x half> %val, half* %base, <vscale x 2 x i8> %index, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv2f16_nxv2i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
define void @test_vsoxseg2_mask_nxv2f16_nxv2i8(<vscale x 2 x half> %val, half* %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv2f16_nxv2i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
define void @test_vsoxseg2_nxv2f16_nxv2i16(<vscale x 2 x half> %val, half* %base, <vscale x 2 x i16> %index, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv2f16_nxv2i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
define void @test_vsoxseg2_mask_nxv2f16_nxv2i16(<vscale x 2 x half> %val, half* %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv2f16_nxv2i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
define void @test_vsoxseg2_nxv2f16_nxv2i64(<vscale x 2 x half> %val, half* %base, <vscale x 2 x i64> %index, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv2f16_nxv2i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vsoxseg2ei64.v v8, (a0), v10
define void @test_vsoxseg2_mask_nxv2f16_nxv2i64(<vscale x 2 x half> %val, half* %base, <vscale x 2 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv2f16_nxv2i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vsoxseg2ei64.v v8, (a0), v10, v0.t
; CHECK-LABEL: test_vsoxseg3_nxv2f16_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vsoxseg3ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_mask_nxv2f16_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vsoxseg3ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_nxv2f16_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vsoxseg3ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_mask_nxv2f16_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vsoxseg3ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_nxv2f16_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vsoxseg3ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_mask_nxv2f16_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vsoxseg3ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
define void @test_vsoxseg3_nxv2f16_nxv2i64(<vscale x 2 x half> %val, half* %base, <vscale x 2 x i64> %index, i64 %vl) {
; CHECK-LABEL: test_vsoxseg3_nxv2f16_nxv2i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv2r.v v12, v10
+; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
-; CHECK-NEXT: vsoxseg3ei64.v v12, (a0), v10
+; CHECK-NEXT: vsoxseg3ei64.v v8, (a0), v12
; CHECK-NEXT: ret
entry:
tail call void @llvm.riscv.vsoxseg3.nxv2f16.nxv2i64(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, half* %base, <vscale x 2 x i64> %index, i64 %vl)
define void @test_vsoxseg3_mask_nxv2f16_nxv2i64(<vscale x 2 x half> %val, half* %base, <vscale x 2 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsoxseg3_mask_nxv2f16_nxv2i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv2r.v v12, v10
+; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
-; CHECK-NEXT: vsoxseg3ei64.v v12, (a0), v10, v0.t
+; CHECK-NEXT: vsoxseg3ei64.v v8, (a0), v12, v0.t
; CHECK-NEXT: ret
entry:
tail call void @llvm.riscv.vsoxseg3.mask.nxv2f16.nxv2i64(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, half* %base, <vscale x 2 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl)
; CHECK-LABEL: test_vsoxseg4_nxv2f16_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vsoxseg4ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_mask_nxv2f16_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vsoxseg4ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_nxv2f16_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vsoxseg4ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_mask_nxv2f16_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vsoxseg4ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_nxv2f16_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vsoxseg4ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_mask_nxv2f16_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vsoxseg4ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_nxv2f16_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vsoxseg4ei64.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_mask_nxv2f16_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vsoxseg4ei64.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_nxv2f16_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vsoxseg5ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_mask_nxv2f16_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vsoxseg5ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_nxv2f16_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vsoxseg5ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_mask_nxv2f16_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vsoxseg5ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_nxv2f16_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vsoxseg5ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_mask_nxv2f16_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vsoxseg5ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_nxv2f16_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vsoxseg5ei64.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg5_mask_nxv2f16_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vsoxseg5ei64.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_nxv2f16_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vsoxseg6ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_mask_nxv2f16_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vsoxseg6ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_nxv2f16_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vsoxseg6ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_mask_nxv2f16_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vsoxseg6ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_nxv2f16_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vsoxseg6ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_mask_nxv2f16_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vsoxseg6ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_nxv2f16_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vsoxseg6ei64.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg6_mask_nxv2f16_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vsoxseg6ei64.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_nxv2f16_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vsoxseg7ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_mask_nxv2f16_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vsoxseg7ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_nxv2f16_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vsoxseg7ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_mask_nxv2f16_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vsoxseg7ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_nxv2f16_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vsoxseg7ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_mask_nxv2f16_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vsoxseg7ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_nxv2f16_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
-; CHECK-NEXT: vmv1r.v v18, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vsoxseg7ei64.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg7_mask_nxv2f16_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
-; CHECK-NEXT: vmv1r.v v18, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vsoxseg7ei64.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_nxv2f16_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vsoxseg8ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_mask_nxv2f16_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vsoxseg8ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_nxv2f16_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vsoxseg8ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_mask_nxv2f16_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vsoxseg8ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_nxv2f16_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vsoxseg8ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_mask_nxv2f16_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vsoxseg8ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_nxv2f16_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
-; CHECK-NEXT: vmv1r.v v18, v12
-; CHECK-NEXT: vmv1r.v v19, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
+; CHECK-NEXT: vmv1r.v v19, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vsoxseg8ei64.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg8_mask_nxv2f16_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
-; CHECK-NEXT: vmv1r.v v18, v12
-; CHECK-NEXT: vmv1r.v v19, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
+; CHECK-NEXT: vmv1r.v v19, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vsoxseg8ei64.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
define void @test_vsoxseg2_nxv4f32_nxv4i32(<vscale x 4 x float> %val, float* %base, <vscale x 4 x i32> %index, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv4f32_nxv4i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
; CHECK-NEXT: vmv2r.v v12, v10
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
define void @test_vsoxseg2_mask_nxv4f32_nxv4i32(<vscale x 4 x float> %val, float* %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv4f32_nxv4i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
; CHECK-NEXT: vmv2r.v v12, v10
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
define void @test_vsoxseg2_nxv4f32_nxv4i8(<vscale x 4 x float> %val, float* %base, <vscale x 4 x i8> %index, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv4f32_nxv4i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
; CHECK-NEXT: vmv1r.v v12, v10
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
define void @test_vsoxseg2_mask_nxv4f32_nxv4i8(<vscale x 4 x float> %val, float* %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv4f32_nxv4i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
; CHECK-NEXT: vmv1r.v v12, v10
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
define void @test_vsoxseg2_nxv4f32_nxv4i64(<vscale x 4 x float> %val, float* %base, <vscale x 4 x i64> %index, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv4f32_nxv4i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; CHECK-NEXT: vsoxseg2ei64.v v8, (a0), v12
define void @test_vsoxseg2_mask_nxv4f32_nxv4i64(<vscale x 4 x float> %val, float* %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv4f32_nxv4i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; CHECK-NEXT: vsoxseg2ei64.v v8, (a0), v12, v0.t
define void @test_vsoxseg2_nxv4f32_nxv4i16(<vscale x 4 x float> %val, float* %base, <vscale x 4 x i16> %index, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_nxv4f32_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
; CHECK-NEXT: vmv1r.v v12, v10
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
define void @test_vsoxseg2_mask_nxv4f32_nxv4i16(<vscale x 4 x float> %val, float* %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv4f32_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
; CHECK-NEXT: vmv1r.v v12, v10
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; CHECK-LABEL: test_vsoxseg3_nxv4f32_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; CHECK-NEXT: vsoxseg3ei32.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_mask_nxv4f32_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; CHECK-NEXT: vsoxseg3ei32.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_nxv4f32_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; CHECK-NEXT: vsoxseg3ei8.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_mask_nxv4f32_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; CHECK-NEXT: vsoxseg3ei8.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
define void @test_vsoxseg3_nxv4f32_nxv4i64(<vscale x 4 x float> %val, float* %base, <vscale x 4 x i64> %index, i64 %vl) {
; CHECK-LABEL: test_vsoxseg3_nxv4f32_nxv4i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv2r.v v16, v8
-; CHECK-NEXT: vmv2r.v v18, v16
-; CHECK-NEXT: vmv2r.v v20, v16
+; CHECK-NEXT: vmv2r.v v10, v8
+; CHECK-NEXT: vmv4r.v v16, v12
+; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
-; CHECK-NEXT: vsoxseg3ei64.v v16, (a0), v12
+; CHECK-NEXT: vsoxseg3ei64.v v8, (a0), v16
; CHECK-NEXT: ret
entry:
tail call void @llvm.riscv.vsoxseg3.nxv4f32.nxv4i64(<vscale x 4 x float> %val,<vscale x 4 x float> %val,<vscale x 4 x float> %val, float* %base, <vscale x 4 x i64> %index, i64 %vl)
define void @test_vsoxseg3_mask_nxv4f32_nxv4i64(<vscale x 4 x float> %val, float* %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsoxseg3_mask_nxv4f32_nxv4i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv2r.v v16, v8
-; CHECK-NEXT: vmv2r.v v18, v16
-; CHECK-NEXT: vmv2r.v v20, v16
+; CHECK-NEXT: vmv2r.v v10, v8
+; CHECK-NEXT: vmv4r.v v16, v12
+; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
-; CHECK-NEXT: vsoxseg3ei64.v v16, (a0), v12, v0.t
+; CHECK-NEXT: vsoxseg3ei64.v v8, (a0), v16, v0.t
; CHECK-NEXT: ret
entry:
tail call void @llvm.riscv.vsoxseg3.mask.nxv4f32.nxv4i64(<vscale x 4 x float> %val,<vscale x 4 x float> %val,<vscale x 4 x float> %val, float* %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl)
; CHECK-LABEL: test_vsoxseg3_nxv4f32_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; CHECK-NEXT: vsoxseg3ei16.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg3_mask_nxv4f32_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; CHECK-NEXT: vsoxseg3ei16.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_nxv4f32_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; CHECK-NEXT: vsoxseg4ei32.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_mask_nxv4f32_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; CHECK-NEXT: vsoxseg4ei32.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_nxv4f32_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; CHECK-NEXT: vsoxseg4ei8.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_mask_nxv4f32_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; CHECK-NEXT: vsoxseg4ei8.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_nxv4f32_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v16, v8
-; CHECK-NEXT: vmv2r.v v18, v16
-; CHECK-NEXT: vmv2r.v v20, v16
-; CHECK-NEXT: vmv2r.v v22, v16
+; CHECK-NEXT: vmv2r.v v18, v8
+; CHECK-NEXT: vmv2r.v v20, v8
+; CHECK-NEXT: vmv2r.v v22, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; CHECK-NEXT: vsoxseg4ei64.v v16, (a0), v12
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_mask_nxv4f32_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v16, v8
-; CHECK-NEXT: vmv2r.v v18, v16
-; CHECK-NEXT: vmv2r.v v20, v16
-; CHECK-NEXT: vmv2r.v v22, v16
+; CHECK-NEXT: vmv2r.v v18, v8
+; CHECK-NEXT: vmv2r.v v20, v8
+; CHECK-NEXT: vmv2r.v v22, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; CHECK-NEXT: vsoxseg4ei64.v v16, (a0), v12, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_nxv4f32_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; CHECK-NEXT: vsoxseg4ei16.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsoxseg4_mask_nxv4f32_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; CHECK-NEXT: vsoxseg4ei16.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
define void @test_vsseg2_nxv16i16(<vscale x 16 x i16> %val, i16* %base, i32 %vl) {
; CHECK-LABEL: test_vsseg2_nxv16i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
; CHECK-NEXT: vsseg2e16.v v8, (a0)
define void @test_vsseg2_mask_nxv16i16(<vscale x 16 x i16> %val, i16* %base, <vscale x 16 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsseg2_mask_nxv16i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
; CHECK-NEXT: vsseg2e16.v v8, (a0), v0.t
define void @test_vsseg2_nxv1i8(<vscale x 1 x i8> %val, i8* %base, i32 %vl) {
; CHECK-LABEL: test_vsseg2_nxv1i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vsseg2e8.v v8, (a0)
define void @test_vsseg2_mask_nxv1i8(<vscale x 1 x i8> %val, i8* %base, <vscale x 1 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsseg2_mask_nxv1i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vsseg2e8.v v8, (a0), v0.t
define void @test_vsseg3_nxv1i8(<vscale x 1 x i8> %val, i8* %base, i32 %vl) {
; CHECK-LABEL: test_vsseg3_nxv1i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
define void @test_vsseg3_mask_nxv1i8(<vscale x 1 x i8> %val, i8* %base, <vscale x 1 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsseg3_mask_nxv1i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
define void @test_vsseg4_nxv1i8(<vscale x 1 x i8> %val, i8* %base, i32 %vl) {
; CHECK-LABEL: test_vsseg4_nxv1i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg4_mask_nxv1i8(<vscale x 1 x i8> %val, i8* %base, <vscale x 1 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsseg4_mask_nxv1i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg5_nxv1i8(<vscale x 1 x i8> %val, i8* %base, i32 %vl) {
; CHECK-LABEL: test_vsseg5_nxv1i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg5_mask_nxv1i8(<vscale x 1 x i8> %val, i8* %base, <vscale x 1 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsseg5_mask_nxv1i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg6_nxv1i8(<vscale x 1 x i8> %val, i8* %base, i32 %vl) {
; CHECK-LABEL: test_vsseg6_nxv1i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg6_mask_nxv1i8(<vscale x 1 x i8> %val, i8* %base, <vscale x 1 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsseg6_mask_nxv1i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg7_nxv1i8(<vscale x 1 x i8> %val, i8* %base, i32 %vl) {
; CHECK-LABEL: test_vsseg7_nxv1i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg7_mask_nxv1i8(<vscale x 1 x i8> %val, i8* %base, <vscale x 1 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsseg7_mask_nxv1i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg8_nxv1i8(<vscale x 1 x i8> %val, i8* %base, i32 %vl) {
; CHECK-LABEL: test_vsseg8_nxv1i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg8_mask_nxv1i8(<vscale x 1 x i8> %val, i8* %base, <vscale x 1 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsseg8_mask_nxv1i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg2_nxv16i8(<vscale x 16 x i8> %val, i8* %base, i32 %vl) {
; CHECK-LABEL: test_vsseg2_nxv16i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
; CHECK-NEXT: vsseg2e8.v v8, (a0)
define void @test_vsseg2_mask_nxv16i8(<vscale x 16 x i8> %val, i8* %base, <vscale x 16 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsseg2_mask_nxv16i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
; CHECK-NEXT: vsseg2e8.v v8, (a0), v0.t
define void @test_vsseg3_nxv16i8(<vscale x 16 x i8> %val, i8* %base, i32 %vl) {
; CHECK-LABEL: test_vsseg3_nxv16i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
define void @test_vsseg3_mask_nxv16i8(<vscale x 16 x i8> %val, i8* %base, <vscale x 16 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsseg3_mask_nxv16i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
define void @test_vsseg4_nxv16i8(<vscale x 16 x i8> %val, i8* %base, i32 %vl) {
; CHECK-LABEL: test_vsseg4_nxv16i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2_v14m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vmv2r.v v14, v8
define void @test_vsseg4_mask_nxv16i8(<vscale x 16 x i8> %val, i8* %base, <vscale x 16 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsseg4_mask_nxv16i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2_v14m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vmv2r.v v14, v8
define void @test_vsseg2_nxv2i32(<vscale x 2 x i32> %val, i32* %base, i32 %vl) {
; CHECK-LABEL: test_vsseg2_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vsseg2e32.v v8, (a0)
define void @test_vsseg2_mask_nxv2i32(<vscale x 2 x i32> %val, i32* %base, <vscale x 2 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsseg2_mask_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vsseg2e32.v v8, (a0), v0.t
define void @test_vsseg3_nxv2i32(<vscale x 2 x i32> %val, i32* %base, i32 %vl) {
; CHECK-LABEL: test_vsseg3_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
define void @test_vsseg3_mask_nxv2i32(<vscale x 2 x i32> %val, i32* %base, <vscale x 2 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsseg3_mask_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
define void @test_vsseg4_nxv2i32(<vscale x 2 x i32> %val, i32* %base, i32 %vl) {
; CHECK-LABEL: test_vsseg4_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg4_mask_nxv2i32(<vscale x 2 x i32> %val, i32* %base, <vscale x 2 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsseg4_mask_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg5_nxv2i32(<vscale x 2 x i32> %val, i32* %base, i32 %vl) {
; CHECK-LABEL: test_vsseg5_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg5_mask_nxv2i32(<vscale x 2 x i32> %val, i32* %base, <vscale x 2 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsseg5_mask_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg6_nxv2i32(<vscale x 2 x i32> %val, i32* %base, i32 %vl) {
; CHECK-LABEL: test_vsseg6_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg6_mask_nxv2i32(<vscale x 2 x i32> %val, i32* %base, <vscale x 2 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsseg6_mask_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg7_nxv2i32(<vscale x 2 x i32> %val, i32* %base, i32 %vl) {
; CHECK-LABEL: test_vsseg7_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg7_mask_nxv2i32(<vscale x 2 x i32> %val, i32* %base, <vscale x 2 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsseg7_mask_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg8_nxv2i32(<vscale x 2 x i32> %val, i32* %base, i32 %vl) {
; CHECK-LABEL: test_vsseg8_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg8_mask_nxv2i32(<vscale x 2 x i32> %val, i32* %base, <vscale x 2 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsseg8_mask_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg2_nxv4i16(<vscale x 4 x i16> %val, i16* %base, i32 %vl) {
; CHECK-LABEL: test_vsseg2_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vsseg2e16.v v8, (a0)
define void @test_vsseg2_mask_nxv4i16(<vscale x 4 x i16> %val, i16* %base, <vscale x 4 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsseg2_mask_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vsseg2e16.v v8, (a0), v0.t
define void @test_vsseg3_nxv4i16(<vscale x 4 x i16> %val, i16* %base, i32 %vl) {
; CHECK-LABEL: test_vsseg3_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
define void @test_vsseg3_mask_nxv4i16(<vscale x 4 x i16> %val, i16* %base, <vscale x 4 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsseg3_mask_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
define void @test_vsseg4_nxv4i16(<vscale x 4 x i16> %val, i16* %base, i32 %vl) {
; CHECK-LABEL: test_vsseg4_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg4_mask_nxv4i16(<vscale x 4 x i16> %val, i16* %base, <vscale x 4 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsseg4_mask_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg5_nxv4i16(<vscale x 4 x i16> %val, i16* %base, i32 %vl) {
; CHECK-LABEL: test_vsseg5_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg5_mask_nxv4i16(<vscale x 4 x i16> %val, i16* %base, <vscale x 4 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsseg5_mask_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg6_nxv4i16(<vscale x 4 x i16> %val, i16* %base, i32 %vl) {
; CHECK-LABEL: test_vsseg6_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg6_mask_nxv4i16(<vscale x 4 x i16> %val, i16* %base, <vscale x 4 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsseg6_mask_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg7_nxv4i16(<vscale x 4 x i16> %val, i16* %base, i32 %vl) {
; CHECK-LABEL: test_vsseg7_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg7_mask_nxv4i16(<vscale x 4 x i16> %val, i16* %base, <vscale x 4 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsseg7_mask_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg8_nxv4i16(<vscale x 4 x i16> %val, i16* %base, i32 %vl) {
; CHECK-LABEL: test_vsseg8_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg8_mask_nxv4i16(<vscale x 4 x i16> %val, i16* %base, <vscale x 4 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsseg8_mask_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg2_nxv1i32(<vscale x 1 x i32> %val, i32* %base, i32 %vl) {
; CHECK-LABEL: test_vsseg2_nxv1i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vsseg2e32.v v8, (a0)
define void @test_vsseg2_mask_nxv1i32(<vscale x 1 x i32> %val, i32* %base, <vscale x 1 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsseg2_mask_nxv1i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vsseg2e32.v v8, (a0), v0.t
define void @test_vsseg3_nxv1i32(<vscale x 1 x i32> %val, i32* %base, i32 %vl) {
; CHECK-LABEL: test_vsseg3_nxv1i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
define void @test_vsseg3_mask_nxv1i32(<vscale x 1 x i32> %val, i32* %base, <vscale x 1 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsseg3_mask_nxv1i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
define void @test_vsseg4_nxv1i32(<vscale x 1 x i32> %val, i32* %base, i32 %vl) {
; CHECK-LABEL: test_vsseg4_nxv1i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg4_mask_nxv1i32(<vscale x 1 x i32> %val, i32* %base, <vscale x 1 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsseg4_mask_nxv1i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg5_nxv1i32(<vscale x 1 x i32> %val, i32* %base, i32 %vl) {
; CHECK-LABEL: test_vsseg5_nxv1i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg5_mask_nxv1i32(<vscale x 1 x i32> %val, i32* %base, <vscale x 1 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsseg5_mask_nxv1i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg6_nxv1i32(<vscale x 1 x i32> %val, i32* %base, i32 %vl) {
; CHECK-LABEL: test_vsseg6_nxv1i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg6_mask_nxv1i32(<vscale x 1 x i32> %val, i32* %base, <vscale x 1 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsseg6_mask_nxv1i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg7_nxv1i32(<vscale x 1 x i32> %val, i32* %base, i32 %vl) {
; CHECK-LABEL: test_vsseg7_nxv1i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg7_mask_nxv1i32(<vscale x 1 x i32> %val, i32* %base, <vscale x 1 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsseg7_mask_nxv1i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg8_nxv1i32(<vscale x 1 x i32> %val, i32* %base, i32 %vl) {
; CHECK-LABEL: test_vsseg8_nxv1i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg8_mask_nxv1i32(<vscale x 1 x i32> %val, i32* %base, <vscale x 1 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsseg8_mask_nxv1i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg2_nxv8i16(<vscale x 8 x i16> %val, i16* %base, i32 %vl) {
; CHECK-LABEL: test_vsseg2_nxv8i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
; CHECK-NEXT: vsseg2e16.v v8, (a0)
define void @test_vsseg2_mask_nxv8i16(<vscale x 8 x i16> %val, i16* %base, <vscale x 8 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsseg2_mask_nxv8i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
; CHECK-NEXT: vsseg2e16.v v8, (a0), v0.t
define void @test_vsseg3_nxv8i16(<vscale x 8 x i16> %val, i16* %base, i32 %vl) {
; CHECK-LABEL: test_vsseg3_nxv8i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
define void @test_vsseg3_mask_nxv8i16(<vscale x 8 x i16> %val, i16* %base, <vscale x 8 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsseg3_mask_nxv8i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
define void @test_vsseg4_nxv8i16(<vscale x 8 x i16> %val, i16* %base, i32 %vl) {
; CHECK-LABEL: test_vsseg4_nxv8i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2_v14m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vmv2r.v v14, v8
define void @test_vsseg4_mask_nxv8i16(<vscale x 8 x i16> %val, i16* %base, <vscale x 8 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsseg4_mask_nxv8i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2_v14m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vmv2r.v v14, v8
define void @test_vsseg2_nxv8i8(<vscale x 8 x i8> %val, i8* %base, i32 %vl) {
; CHECK-LABEL: test_vsseg2_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vsseg2e8.v v8, (a0)
define void @test_vsseg2_mask_nxv8i8(<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsseg2_mask_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vsseg2e8.v v8, (a0), v0.t
define void @test_vsseg3_nxv8i8(<vscale x 8 x i8> %val, i8* %base, i32 %vl) {
; CHECK-LABEL: test_vsseg3_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
define void @test_vsseg3_mask_nxv8i8(<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsseg3_mask_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
define void @test_vsseg4_nxv8i8(<vscale x 8 x i8> %val, i8* %base, i32 %vl) {
; CHECK-LABEL: test_vsseg4_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg4_mask_nxv8i8(<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsseg4_mask_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg5_nxv8i8(<vscale x 8 x i8> %val, i8* %base, i32 %vl) {
; CHECK-LABEL: test_vsseg5_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg5_mask_nxv8i8(<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsseg5_mask_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg6_nxv8i8(<vscale x 8 x i8> %val, i8* %base, i32 %vl) {
; CHECK-LABEL: test_vsseg6_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg6_mask_nxv8i8(<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsseg6_mask_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg7_nxv8i8(<vscale x 8 x i8> %val, i8* %base, i32 %vl) {
; CHECK-LABEL: test_vsseg7_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg7_mask_nxv8i8(<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsseg7_mask_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg8_nxv8i8(<vscale x 8 x i8> %val, i8* %base, i32 %vl) {
; CHECK-LABEL: test_vsseg8_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg8_mask_nxv8i8(<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsseg8_mask_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg2_nxv8i32(<vscale x 8 x i32> %val, i32* %base, i32 %vl) {
; CHECK-LABEL: test_vsseg2_nxv8i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
; CHECK-NEXT: vsseg2e32.v v8, (a0)
define void @test_vsseg2_mask_nxv8i32(<vscale x 8 x i32> %val, i32* %base, <vscale x 8 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsseg2_mask_nxv8i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
; CHECK-NEXT: vsseg2e32.v v8, (a0), v0.t
define void @test_vsseg2_nxv4i8(<vscale x 4 x i8> %val, i8* %base, i32 %vl) {
; CHECK-LABEL: test_vsseg2_nxv4i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vsseg2e8.v v8, (a0)
define void @test_vsseg2_mask_nxv4i8(<vscale x 4 x i8> %val, i8* %base, <vscale x 4 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsseg2_mask_nxv4i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vsseg2e8.v v8, (a0), v0.t
define void @test_vsseg3_nxv4i8(<vscale x 4 x i8> %val, i8* %base, i32 %vl) {
; CHECK-LABEL: test_vsseg3_nxv4i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
define void @test_vsseg3_mask_nxv4i8(<vscale x 4 x i8> %val, i8* %base, <vscale x 4 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsseg3_mask_nxv4i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
define void @test_vsseg4_nxv4i8(<vscale x 4 x i8> %val, i8* %base, i32 %vl) {
; CHECK-LABEL: test_vsseg4_nxv4i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg4_mask_nxv4i8(<vscale x 4 x i8> %val, i8* %base, <vscale x 4 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsseg4_mask_nxv4i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg5_nxv4i8(<vscale x 4 x i8> %val, i8* %base, i32 %vl) {
; CHECK-LABEL: test_vsseg5_nxv4i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg5_mask_nxv4i8(<vscale x 4 x i8> %val, i8* %base, <vscale x 4 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsseg5_mask_nxv4i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg6_nxv4i8(<vscale x 4 x i8> %val, i8* %base, i32 %vl) {
; CHECK-LABEL: test_vsseg6_nxv4i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg6_mask_nxv4i8(<vscale x 4 x i8> %val, i8* %base, <vscale x 4 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsseg6_mask_nxv4i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg7_nxv4i8(<vscale x 4 x i8> %val, i8* %base, i32 %vl) {
; CHECK-LABEL: test_vsseg7_nxv4i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg7_mask_nxv4i8(<vscale x 4 x i8> %val, i8* %base, <vscale x 4 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsseg7_mask_nxv4i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg8_nxv4i8(<vscale x 4 x i8> %val, i8* %base, i32 %vl) {
; CHECK-LABEL: test_vsseg8_nxv4i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg8_mask_nxv4i8(<vscale x 4 x i8> %val, i8* %base, <vscale x 4 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsseg8_mask_nxv4i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg2_nxv1i16(<vscale x 1 x i16> %val, i16* %base, i32 %vl) {
; CHECK-LABEL: test_vsseg2_nxv1i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vsseg2e16.v v8, (a0)
define void @test_vsseg2_mask_nxv1i16(<vscale x 1 x i16> %val, i16* %base, <vscale x 1 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsseg2_mask_nxv1i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vsseg2e16.v v8, (a0), v0.t
define void @test_vsseg3_nxv1i16(<vscale x 1 x i16> %val, i16* %base, i32 %vl) {
; CHECK-LABEL: test_vsseg3_nxv1i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
define void @test_vsseg3_mask_nxv1i16(<vscale x 1 x i16> %val, i16* %base, <vscale x 1 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsseg3_mask_nxv1i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
define void @test_vsseg4_nxv1i16(<vscale x 1 x i16> %val, i16* %base, i32 %vl) {
; CHECK-LABEL: test_vsseg4_nxv1i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg4_mask_nxv1i16(<vscale x 1 x i16> %val, i16* %base, <vscale x 1 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsseg4_mask_nxv1i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg5_nxv1i16(<vscale x 1 x i16> %val, i16* %base, i32 %vl) {
; CHECK-LABEL: test_vsseg5_nxv1i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg5_mask_nxv1i16(<vscale x 1 x i16> %val, i16* %base, <vscale x 1 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsseg5_mask_nxv1i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg6_nxv1i16(<vscale x 1 x i16> %val, i16* %base, i32 %vl) {
; CHECK-LABEL: test_vsseg6_nxv1i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg6_mask_nxv1i16(<vscale x 1 x i16> %val, i16* %base, <vscale x 1 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsseg6_mask_nxv1i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg7_nxv1i16(<vscale x 1 x i16> %val, i16* %base, i32 %vl) {
; CHECK-LABEL: test_vsseg7_nxv1i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg7_mask_nxv1i16(<vscale x 1 x i16> %val, i16* %base, <vscale x 1 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsseg7_mask_nxv1i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg8_nxv1i16(<vscale x 1 x i16> %val, i16* %base, i32 %vl) {
; CHECK-LABEL: test_vsseg8_nxv1i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg8_mask_nxv1i16(<vscale x 1 x i16> %val, i16* %base, <vscale x 1 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsseg8_mask_nxv1i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg2_nxv32i8(<vscale x 32 x i8> %val, i8* %base, i32 %vl) {
; CHECK-LABEL: test_vsseg2_nxv32i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu
; CHECK-NEXT: vsseg2e8.v v8, (a0)
define void @test_vsseg2_mask_nxv32i8(<vscale x 32 x i8> %val, i8* %base, <vscale x 32 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsseg2_mask_nxv32i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu
; CHECK-NEXT: vsseg2e8.v v8, (a0), v0.t
define void @test_vsseg2_nxv2i8(<vscale x 2 x i8> %val, i8* %base, i32 %vl) {
; CHECK-LABEL: test_vsseg2_nxv2i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vsseg2e8.v v8, (a0)
define void @test_vsseg2_mask_nxv2i8(<vscale x 2 x i8> %val, i8* %base, <vscale x 2 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsseg2_mask_nxv2i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vsseg2e8.v v8, (a0), v0.t
define void @test_vsseg3_nxv2i8(<vscale x 2 x i8> %val, i8* %base, i32 %vl) {
; CHECK-LABEL: test_vsseg3_nxv2i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
define void @test_vsseg3_mask_nxv2i8(<vscale x 2 x i8> %val, i8* %base, <vscale x 2 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsseg3_mask_nxv2i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
define void @test_vsseg4_nxv2i8(<vscale x 2 x i8> %val, i8* %base, i32 %vl) {
; CHECK-LABEL: test_vsseg4_nxv2i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg4_mask_nxv2i8(<vscale x 2 x i8> %val, i8* %base, <vscale x 2 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsseg4_mask_nxv2i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg5_nxv2i8(<vscale x 2 x i8> %val, i8* %base, i32 %vl) {
; CHECK-LABEL: test_vsseg5_nxv2i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg5_mask_nxv2i8(<vscale x 2 x i8> %val, i8* %base, <vscale x 2 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsseg5_mask_nxv2i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg6_nxv2i8(<vscale x 2 x i8> %val, i8* %base, i32 %vl) {
; CHECK-LABEL: test_vsseg6_nxv2i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg6_mask_nxv2i8(<vscale x 2 x i8> %val, i8* %base, <vscale x 2 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsseg6_mask_nxv2i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg7_nxv2i8(<vscale x 2 x i8> %val, i8* %base, i32 %vl) {
; CHECK-LABEL: test_vsseg7_nxv2i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg7_mask_nxv2i8(<vscale x 2 x i8> %val, i8* %base, <vscale x 2 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsseg7_mask_nxv2i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg8_nxv2i8(<vscale x 2 x i8> %val, i8* %base, i32 %vl) {
; CHECK-LABEL: test_vsseg8_nxv2i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg8_mask_nxv2i8(<vscale x 2 x i8> %val, i8* %base, <vscale x 2 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsseg8_mask_nxv2i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg2_nxv2i16(<vscale x 2 x i16> %val, i16* %base, i32 %vl) {
; CHECK-LABEL: test_vsseg2_nxv2i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vsseg2e16.v v8, (a0)
define void @test_vsseg2_mask_nxv2i16(<vscale x 2 x i16> %val, i16* %base, <vscale x 2 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsseg2_mask_nxv2i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vsseg2e16.v v8, (a0), v0.t
define void @test_vsseg3_nxv2i16(<vscale x 2 x i16> %val, i16* %base, i32 %vl) {
; CHECK-LABEL: test_vsseg3_nxv2i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
define void @test_vsseg3_mask_nxv2i16(<vscale x 2 x i16> %val, i16* %base, <vscale x 2 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsseg3_mask_nxv2i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
define void @test_vsseg4_nxv2i16(<vscale x 2 x i16> %val, i16* %base, i32 %vl) {
; CHECK-LABEL: test_vsseg4_nxv2i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg4_mask_nxv2i16(<vscale x 2 x i16> %val, i16* %base, <vscale x 2 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsseg4_mask_nxv2i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg5_nxv2i16(<vscale x 2 x i16> %val, i16* %base, i32 %vl) {
; CHECK-LABEL: test_vsseg5_nxv2i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg5_mask_nxv2i16(<vscale x 2 x i16> %val, i16* %base, <vscale x 2 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsseg5_mask_nxv2i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg6_nxv2i16(<vscale x 2 x i16> %val, i16* %base, i32 %vl) {
; CHECK-LABEL: test_vsseg6_nxv2i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg6_mask_nxv2i16(<vscale x 2 x i16> %val, i16* %base, <vscale x 2 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsseg6_mask_nxv2i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg7_nxv2i16(<vscale x 2 x i16> %val, i16* %base, i32 %vl) {
; CHECK-LABEL: test_vsseg7_nxv2i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg7_mask_nxv2i16(<vscale x 2 x i16> %val, i16* %base, <vscale x 2 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsseg7_mask_nxv2i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg8_nxv2i16(<vscale x 2 x i16> %val, i16* %base, i32 %vl) {
; CHECK-LABEL: test_vsseg8_nxv2i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg8_mask_nxv2i16(<vscale x 2 x i16> %val, i16* %base, <vscale x 2 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsseg8_mask_nxv2i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg2_nxv4i32(<vscale x 4 x i32> %val, i32* %base, i32 %vl) {
; CHECK-LABEL: test_vsseg2_nxv4i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; CHECK-NEXT: vsseg2e32.v v8, (a0)
define void @test_vsseg2_mask_nxv4i32(<vscale x 4 x i32> %val, i32* %base, <vscale x 4 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsseg2_mask_nxv4i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; CHECK-NEXT: vsseg2e32.v v8, (a0), v0.t
define void @test_vsseg3_nxv4i32(<vscale x 4 x i32> %val, i32* %base, i32 %vl) {
; CHECK-LABEL: test_vsseg3_nxv4i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
define void @test_vsseg3_mask_nxv4i32(<vscale x 4 x i32> %val, i32* %base, <vscale x 4 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsseg3_mask_nxv4i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
define void @test_vsseg4_nxv4i32(<vscale x 4 x i32> %val, i32* %base, i32 %vl) {
; CHECK-LABEL: test_vsseg4_nxv4i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2_v14m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vmv2r.v v14, v8
define void @test_vsseg4_mask_nxv4i32(<vscale x 4 x i32> %val, i32* %base, <vscale x 4 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsseg4_mask_nxv4i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2_v14m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vmv2r.v v14, v8
define void @test_vsseg2_nxv16f16(<vscale x 16 x half> %val, half* %base, i32 %vl) {
; CHECK-LABEL: test_vsseg2_nxv16f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
; CHECK-NEXT: vsseg2e16.v v8, (a0)
define void @test_vsseg2_mask_nxv16f16(<vscale x 16 x half> %val, half* %base, <vscale x 16 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsseg2_mask_nxv16f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
; CHECK-NEXT: vsseg2e16.v v8, (a0), v0.t
define void @test_vsseg2_nxv4f64(<vscale x 4 x double> %val, double* %base, i32 %vl) {
; CHECK-LABEL: test_vsseg2_nxv4f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu
; CHECK-NEXT: vsseg2e64.v v8, (a0)
define void @test_vsseg2_mask_nxv4f64(<vscale x 4 x double> %val, double* %base, <vscale x 4 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsseg2_mask_nxv4f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu
; CHECK-NEXT: vsseg2e64.v v8, (a0), v0.t
define void @test_vsseg2_nxv1f64(<vscale x 1 x double> %val, double* %base, i32 %vl) {
; CHECK-LABEL: test_vsseg2_nxv1f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vsseg2e64.v v8, (a0)
define void @test_vsseg2_mask_nxv1f64(<vscale x 1 x double> %val, double* %base, <vscale x 1 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsseg2_mask_nxv1f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vsseg2e64.v v8, (a0), v0.t
define void @test_vsseg3_nxv1f64(<vscale x 1 x double> %val, double* %base, i32 %vl) {
; CHECK-LABEL: test_vsseg3_nxv1f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
define void @test_vsseg3_mask_nxv1f64(<vscale x 1 x double> %val, double* %base, <vscale x 1 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsseg3_mask_nxv1f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
define void @test_vsseg4_nxv1f64(<vscale x 1 x double> %val, double* %base, i32 %vl) {
; CHECK-LABEL: test_vsseg4_nxv1f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg4_mask_nxv1f64(<vscale x 1 x double> %val, double* %base, <vscale x 1 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsseg4_mask_nxv1f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg5_nxv1f64(<vscale x 1 x double> %val, double* %base, i32 %vl) {
; CHECK-LABEL: test_vsseg5_nxv1f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg5_mask_nxv1f64(<vscale x 1 x double> %val, double* %base, <vscale x 1 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsseg5_mask_nxv1f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg6_nxv1f64(<vscale x 1 x double> %val, double* %base, i32 %vl) {
; CHECK-LABEL: test_vsseg6_nxv1f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg6_mask_nxv1f64(<vscale x 1 x double> %val, double* %base, <vscale x 1 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsseg6_mask_nxv1f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg7_nxv1f64(<vscale x 1 x double> %val, double* %base, i32 %vl) {
; CHECK-LABEL: test_vsseg7_nxv1f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg7_mask_nxv1f64(<vscale x 1 x double> %val, double* %base, <vscale x 1 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsseg7_mask_nxv1f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg8_nxv1f64(<vscale x 1 x double> %val, double* %base, i32 %vl) {
; CHECK-LABEL: test_vsseg8_nxv1f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg8_mask_nxv1f64(<vscale x 1 x double> %val, double* %base, <vscale x 1 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsseg8_mask_nxv1f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg2_nxv2f32(<vscale x 2 x float> %val, float* %base, i32 %vl) {
; CHECK-LABEL: test_vsseg2_nxv2f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vsseg2e32.v v8, (a0)
define void @test_vsseg2_mask_nxv2f32(<vscale x 2 x float> %val, float* %base, <vscale x 2 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsseg2_mask_nxv2f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vsseg2e32.v v8, (a0), v0.t
define void @test_vsseg3_nxv2f32(<vscale x 2 x float> %val, float* %base, i32 %vl) {
; CHECK-LABEL: test_vsseg3_nxv2f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
define void @test_vsseg3_mask_nxv2f32(<vscale x 2 x float> %val, float* %base, <vscale x 2 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsseg3_mask_nxv2f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
define void @test_vsseg4_nxv2f32(<vscale x 2 x float> %val, float* %base, i32 %vl) {
; CHECK-LABEL: test_vsseg4_nxv2f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg4_mask_nxv2f32(<vscale x 2 x float> %val, float* %base, <vscale x 2 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsseg4_mask_nxv2f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg5_nxv2f32(<vscale x 2 x float> %val, float* %base, i32 %vl) {
; CHECK-LABEL: test_vsseg5_nxv2f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg5_mask_nxv2f32(<vscale x 2 x float> %val, float* %base, <vscale x 2 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsseg5_mask_nxv2f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg6_nxv2f32(<vscale x 2 x float> %val, float* %base, i32 %vl) {
; CHECK-LABEL: test_vsseg6_nxv2f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg6_mask_nxv2f32(<vscale x 2 x float> %val, float* %base, <vscale x 2 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsseg6_mask_nxv2f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg7_nxv2f32(<vscale x 2 x float> %val, float* %base, i32 %vl) {
; CHECK-LABEL: test_vsseg7_nxv2f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg7_mask_nxv2f32(<vscale x 2 x float> %val, float* %base, <vscale x 2 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsseg7_mask_nxv2f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg8_nxv2f32(<vscale x 2 x float> %val, float* %base, i32 %vl) {
; CHECK-LABEL: test_vsseg8_nxv2f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg8_mask_nxv2f32(<vscale x 2 x float> %val, float* %base, <vscale x 2 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsseg8_mask_nxv2f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg2_nxv1f16(<vscale x 1 x half> %val, half* %base, i32 %vl) {
; CHECK-LABEL: test_vsseg2_nxv1f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vsseg2e16.v v8, (a0)
define void @test_vsseg2_mask_nxv1f16(<vscale x 1 x half> %val, half* %base, <vscale x 1 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsseg2_mask_nxv1f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vsseg2e16.v v8, (a0), v0.t
define void @test_vsseg3_nxv1f16(<vscale x 1 x half> %val, half* %base, i32 %vl) {
; CHECK-LABEL: test_vsseg3_nxv1f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
define void @test_vsseg3_mask_nxv1f16(<vscale x 1 x half> %val, half* %base, <vscale x 1 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsseg3_mask_nxv1f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
define void @test_vsseg4_nxv1f16(<vscale x 1 x half> %val, half* %base, i32 %vl) {
; CHECK-LABEL: test_vsseg4_nxv1f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg4_mask_nxv1f16(<vscale x 1 x half> %val, half* %base, <vscale x 1 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsseg4_mask_nxv1f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg5_nxv1f16(<vscale x 1 x half> %val, half* %base, i32 %vl) {
; CHECK-LABEL: test_vsseg5_nxv1f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg5_mask_nxv1f16(<vscale x 1 x half> %val, half* %base, <vscale x 1 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsseg5_mask_nxv1f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg6_nxv1f16(<vscale x 1 x half> %val, half* %base, i32 %vl) {
; CHECK-LABEL: test_vsseg6_nxv1f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg6_mask_nxv1f16(<vscale x 1 x half> %val, half* %base, <vscale x 1 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsseg6_mask_nxv1f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg7_nxv1f16(<vscale x 1 x half> %val, half* %base, i32 %vl) {
; CHECK-LABEL: test_vsseg7_nxv1f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg7_mask_nxv1f16(<vscale x 1 x half> %val, half* %base, <vscale x 1 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsseg7_mask_nxv1f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg8_nxv1f16(<vscale x 1 x half> %val, half* %base, i32 %vl) {
; CHECK-LABEL: test_vsseg8_nxv1f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg8_mask_nxv1f16(<vscale x 1 x half> %val, half* %base, <vscale x 1 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsseg8_mask_nxv1f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg2_nxv1f32(<vscale x 1 x float> %val, float* %base, i32 %vl) {
; CHECK-LABEL: test_vsseg2_nxv1f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vsseg2e32.v v8, (a0)
define void @test_vsseg2_mask_nxv1f32(<vscale x 1 x float> %val, float* %base, <vscale x 1 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsseg2_mask_nxv1f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vsseg2e32.v v8, (a0), v0.t
define void @test_vsseg3_nxv1f32(<vscale x 1 x float> %val, float* %base, i32 %vl) {
; CHECK-LABEL: test_vsseg3_nxv1f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
define void @test_vsseg3_mask_nxv1f32(<vscale x 1 x float> %val, float* %base, <vscale x 1 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsseg3_mask_nxv1f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
define void @test_vsseg4_nxv1f32(<vscale x 1 x float> %val, float* %base, i32 %vl) {
; CHECK-LABEL: test_vsseg4_nxv1f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg4_mask_nxv1f32(<vscale x 1 x float> %val, float* %base, <vscale x 1 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsseg4_mask_nxv1f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg5_nxv1f32(<vscale x 1 x float> %val, float* %base, i32 %vl) {
; CHECK-LABEL: test_vsseg5_nxv1f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg5_mask_nxv1f32(<vscale x 1 x float> %val, float* %base, <vscale x 1 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsseg5_mask_nxv1f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg6_nxv1f32(<vscale x 1 x float> %val, float* %base, i32 %vl) {
; CHECK-LABEL: test_vsseg6_nxv1f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg6_mask_nxv1f32(<vscale x 1 x float> %val, float* %base, <vscale x 1 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsseg6_mask_nxv1f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg7_nxv1f32(<vscale x 1 x float> %val, float* %base, i32 %vl) {
; CHECK-LABEL: test_vsseg7_nxv1f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg7_mask_nxv1f32(<vscale x 1 x float> %val, float* %base, <vscale x 1 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsseg7_mask_nxv1f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg8_nxv1f32(<vscale x 1 x float> %val, float* %base, i32 %vl) {
; CHECK-LABEL: test_vsseg8_nxv1f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg8_mask_nxv1f32(<vscale x 1 x float> %val, float* %base, <vscale x 1 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsseg8_mask_nxv1f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg2_nxv8f16(<vscale x 8 x half> %val, half* %base, i32 %vl) {
; CHECK-LABEL: test_vsseg2_nxv8f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
; CHECK-NEXT: vsseg2e16.v v8, (a0)
define void @test_vsseg2_mask_nxv8f16(<vscale x 8 x half> %val, half* %base, <vscale x 8 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsseg2_mask_nxv8f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
; CHECK-NEXT: vsseg2e16.v v8, (a0), v0.t
define void @test_vsseg3_nxv8f16(<vscale x 8 x half> %val, half* %base, i32 %vl) {
; CHECK-LABEL: test_vsseg3_nxv8f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
define void @test_vsseg3_mask_nxv8f16(<vscale x 8 x half> %val, half* %base, <vscale x 8 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsseg3_mask_nxv8f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
define void @test_vsseg4_nxv8f16(<vscale x 8 x half> %val, half* %base, i32 %vl) {
; CHECK-LABEL: test_vsseg4_nxv8f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2_v14m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vmv2r.v v14, v8
define void @test_vsseg4_mask_nxv8f16(<vscale x 8 x half> %val, half* %base, <vscale x 8 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsseg4_mask_nxv8f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2_v14m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vmv2r.v v14, v8
define void @test_vsseg2_nxv8f32(<vscale x 8 x float> %val, float* %base, i32 %vl) {
; CHECK-LABEL: test_vsseg2_nxv8f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
; CHECK-NEXT: vsseg2e32.v v8, (a0)
define void @test_vsseg2_mask_nxv8f32(<vscale x 8 x float> %val, float* %base, <vscale x 8 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsseg2_mask_nxv8f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
; CHECK-NEXT: vsseg2e32.v v8, (a0), v0.t
define void @test_vsseg2_nxv2f64(<vscale x 2 x double> %val, double* %base, i32 %vl) {
; CHECK-LABEL: test_vsseg2_nxv2f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
; CHECK-NEXT: vsseg2e64.v v8, (a0)
define void @test_vsseg2_mask_nxv2f64(<vscale x 2 x double> %val, double* %base, <vscale x 2 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsseg2_mask_nxv2f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
; CHECK-NEXT: vsseg2e64.v v8, (a0), v0.t
define void @test_vsseg3_nxv2f64(<vscale x 2 x double> %val, double* %base, i32 %vl) {
; CHECK-LABEL: test_vsseg3_nxv2f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
define void @test_vsseg3_mask_nxv2f64(<vscale x 2 x double> %val, double* %base, <vscale x 2 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsseg3_mask_nxv2f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
define void @test_vsseg4_nxv2f64(<vscale x 2 x double> %val, double* %base, i32 %vl) {
; CHECK-LABEL: test_vsseg4_nxv2f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2_v14m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vmv2r.v v14, v8
define void @test_vsseg4_mask_nxv2f64(<vscale x 2 x double> %val, double* %base, <vscale x 2 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsseg4_mask_nxv2f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2_v14m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vmv2r.v v14, v8
define void @test_vsseg2_nxv4f16(<vscale x 4 x half> %val, half* %base, i32 %vl) {
; CHECK-LABEL: test_vsseg2_nxv4f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vsseg2e16.v v8, (a0)
define void @test_vsseg2_mask_nxv4f16(<vscale x 4 x half> %val, half* %base, <vscale x 4 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsseg2_mask_nxv4f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vsseg2e16.v v8, (a0), v0.t
define void @test_vsseg3_nxv4f16(<vscale x 4 x half> %val, half* %base, i32 %vl) {
; CHECK-LABEL: test_vsseg3_nxv4f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
define void @test_vsseg3_mask_nxv4f16(<vscale x 4 x half> %val, half* %base, <vscale x 4 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsseg3_mask_nxv4f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
define void @test_vsseg4_nxv4f16(<vscale x 4 x half> %val, half* %base, i32 %vl) {
; CHECK-LABEL: test_vsseg4_nxv4f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg4_mask_nxv4f16(<vscale x 4 x half> %val, half* %base, <vscale x 4 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsseg4_mask_nxv4f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg5_nxv4f16(<vscale x 4 x half> %val, half* %base, i32 %vl) {
; CHECK-LABEL: test_vsseg5_nxv4f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg5_mask_nxv4f16(<vscale x 4 x half> %val, half* %base, <vscale x 4 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsseg5_mask_nxv4f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg6_nxv4f16(<vscale x 4 x half> %val, half* %base, i32 %vl) {
; CHECK-LABEL: test_vsseg6_nxv4f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg6_mask_nxv4f16(<vscale x 4 x half> %val, half* %base, <vscale x 4 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsseg6_mask_nxv4f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg7_nxv4f16(<vscale x 4 x half> %val, half* %base, i32 %vl) {
; CHECK-LABEL: test_vsseg7_nxv4f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg7_mask_nxv4f16(<vscale x 4 x half> %val, half* %base, <vscale x 4 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsseg7_mask_nxv4f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg8_nxv4f16(<vscale x 4 x half> %val, half* %base, i32 %vl) {
; CHECK-LABEL: test_vsseg8_nxv4f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg8_mask_nxv4f16(<vscale x 4 x half> %val, half* %base, <vscale x 4 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsseg8_mask_nxv4f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg2_nxv2f16(<vscale x 2 x half> %val, half* %base, i32 %vl) {
; CHECK-LABEL: test_vsseg2_nxv2f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vsseg2e16.v v8, (a0)
define void @test_vsseg2_mask_nxv2f16(<vscale x 2 x half> %val, half* %base, <vscale x 2 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsseg2_mask_nxv2f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vsseg2e16.v v8, (a0), v0.t
define void @test_vsseg3_nxv2f16(<vscale x 2 x half> %val, half* %base, i32 %vl) {
; CHECK-LABEL: test_vsseg3_nxv2f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
define void @test_vsseg3_mask_nxv2f16(<vscale x 2 x half> %val, half* %base, <vscale x 2 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsseg3_mask_nxv2f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
define void @test_vsseg4_nxv2f16(<vscale x 2 x half> %val, half* %base, i32 %vl) {
; CHECK-LABEL: test_vsseg4_nxv2f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg4_mask_nxv2f16(<vscale x 2 x half> %val, half* %base, <vscale x 2 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsseg4_mask_nxv2f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg5_nxv2f16(<vscale x 2 x half> %val, half* %base, i32 %vl) {
; CHECK-LABEL: test_vsseg5_nxv2f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg5_mask_nxv2f16(<vscale x 2 x half> %val, half* %base, <vscale x 2 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsseg5_mask_nxv2f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg6_nxv2f16(<vscale x 2 x half> %val, half* %base, i32 %vl) {
; CHECK-LABEL: test_vsseg6_nxv2f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg6_mask_nxv2f16(<vscale x 2 x half> %val, half* %base, <vscale x 2 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsseg6_mask_nxv2f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg7_nxv2f16(<vscale x 2 x half> %val, half* %base, i32 %vl) {
; CHECK-LABEL: test_vsseg7_nxv2f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg7_mask_nxv2f16(<vscale x 2 x half> %val, half* %base, <vscale x 2 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsseg7_mask_nxv2f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg8_nxv2f16(<vscale x 2 x half> %val, half* %base, i32 %vl) {
; CHECK-LABEL: test_vsseg8_nxv2f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg8_mask_nxv2f16(<vscale x 2 x half> %val, half* %base, <vscale x 2 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsseg8_mask_nxv2f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg2_nxv4f32(<vscale x 4 x float> %val, float* %base, i32 %vl) {
; CHECK-LABEL: test_vsseg2_nxv4f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; CHECK-NEXT: vsseg2e32.v v8, (a0)
define void @test_vsseg2_mask_nxv4f32(<vscale x 4 x float> %val, float* %base, <vscale x 4 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsseg2_mask_nxv4f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; CHECK-NEXT: vsseg2e32.v v8, (a0), v0.t
define void @test_vsseg3_nxv4f32(<vscale x 4 x float> %val, float* %base, i32 %vl) {
; CHECK-LABEL: test_vsseg3_nxv4f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
define void @test_vsseg3_mask_nxv4f32(<vscale x 4 x float> %val, float* %base, <vscale x 4 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsseg3_mask_nxv4f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
define void @test_vsseg4_nxv4f32(<vscale x 4 x float> %val, float* %base, i32 %vl) {
; CHECK-LABEL: test_vsseg4_nxv4f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2_v14m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vmv2r.v v14, v8
define void @test_vsseg4_mask_nxv4f32(<vscale x 4 x float> %val, float* %base, <vscale x 4 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsseg4_mask_nxv4f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2_v14m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vmv2r.v v14, v8
define void @test_vsseg2_nxv16i16(<vscale x 16 x i16> %val, i16* %base, i64 %vl) {
; CHECK-LABEL: test_vsseg2_nxv16i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
; CHECK-NEXT: vsseg2e16.v v8, (a0)
define void @test_vsseg2_mask_nxv16i16(<vscale x 16 x i16> %val, i16* %base, <vscale x 16 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsseg2_mask_nxv16i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
; CHECK-NEXT: vsseg2e16.v v8, (a0), v0.t
define void @test_vsseg2_nxv4i32(<vscale x 4 x i32> %val, i32* %base, i64 %vl) {
; CHECK-LABEL: test_vsseg2_nxv4i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; CHECK-NEXT: vsseg2e32.v v8, (a0)
define void @test_vsseg2_mask_nxv4i32(<vscale x 4 x i32> %val, i32* %base, <vscale x 4 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsseg2_mask_nxv4i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; CHECK-NEXT: vsseg2e32.v v8, (a0), v0.t
define void @test_vsseg3_nxv4i32(<vscale x 4 x i32> %val, i32* %base, i64 %vl) {
; CHECK-LABEL: test_vsseg3_nxv4i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
define void @test_vsseg3_mask_nxv4i32(<vscale x 4 x i32> %val, i32* %base, <vscale x 4 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsseg3_mask_nxv4i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
define void @test_vsseg4_nxv4i32(<vscale x 4 x i32> %val, i32* %base, i64 %vl) {
; CHECK-LABEL: test_vsseg4_nxv4i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2_v14m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vmv2r.v v14, v8
define void @test_vsseg4_mask_nxv4i32(<vscale x 4 x i32> %val, i32* %base, <vscale x 4 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsseg4_mask_nxv4i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2_v14m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vmv2r.v v14, v8
define void @test_vsseg2_nxv16i8(<vscale x 16 x i8> %val, i8* %base, i64 %vl) {
; CHECK-LABEL: test_vsseg2_nxv16i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
; CHECK-NEXT: vsseg2e8.v v8, (a0)
define void @test_vsseg2_mask_nxv16i8(<vscale x 16 x i8> %val, i8* %base, <vscale x 16 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsseg2_mask_nxv16i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
; CHECK-NEXT: vsseg2e8.v v8, (a0), v0.t
define void @test_vsseg3_nxv16i8(<vscale x 16 x i8> %val, i8* %base, i64 %vl) {
; CHECK-LABEL: test_vsseg3_nxv16i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
define void @test_vsseg3_mask_nxv16i8(<vscale x 16 x i8> %val, i8* %base, <vscale x 16 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsseg3_mask_nxv16i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
define void @test_vsseg4_nxv16i8(<vscale x 16 x i8> %val, i8* %base, i64 %vl) {
; CHECK-LABEL: test_vsseg4_nxv16i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2_v14m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vmv2r.v v14, v8
define void @test_vsseg4_mask_nxv16i8(<vscale x 16 x i8> %val, i8* %base, <vscale x 16 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsseg4_mask_nxv16i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2_v14m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vmv2r.v v14, v8
define void @test_vsseg2_nxv1i64(<vscale x 1 x i64> %val, i64* %base, i64 %vl) {
; CHECK-LABEL: test_vsseg2_nxv1i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vsseg2e64.v v8, (a0)
define void @test_vsseg2_mask_nxv1i64(<vscale x 1 x i64> %val, i64* %base, <vscale x 1 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsseg2_mask_nxv1i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vsseg2e64.v v8, (a0), v0.t
define void @test_vsseg3_nxv1i64(<vscale x 1 x i64> %val, i64* %base, i64 %vl) {
; CHECK-LABEL: test_vsseg3_nxv1i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
define void @test_vsseg3_mask_nxv1i64(<vscale x 1 x i64> %val, i64* %base, <vscale x 1 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsseg3_mask_nxv1i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
define void @test_vsseg4_nxv1i64(<vscale x 1 x i64> %val, i64* %base, i64 %vl) {
; CHECK-LABEL: test_vsseg4_nxv1i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg4_mask_nxv1i64(<vscale x 1 x i64> %val, i64* %base, <vscale x 1 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsseg4_mask_nxv1i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg5_nxv1i64(<vscale x 1 x i64> %val, i64* %base, i64 %vl) {
; CHECK-LABEL: test_vsseg5_nxv1i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg5_mask_nxv1i64(<vscale x 1 x i64> %val, i64* %base, <vscale x 1 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsseg5_mask_nxv1i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg6_nxv1i64(<vscale x 1 x i64> %val, i64* %base, i64 %vl) {
; CHECK-LABEL: test_vsseg6_nxv1i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg6_mask_nxv1i64(<vscale x 1 x i64> %val, i64* %base, <vscale x 1 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsseg6_mask_nxv1i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg7_nxv1i64(<vscale x 1 x i64> %val, i64* %base, i64 %vl) {
; CHECK-LABEL: test_vsseg7_nxv1i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg7_mask_nxv1i64(<vscale x 1 x i64> %val, i64* %base, <vscale x 1 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsseg7_mask_nxv1i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg8_nxv1i64(<vscale x 1 x i64> %val, i64* %base, i64 %vl) {
; CHECK-LABEL: test_vsseg8_nxv1i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg8_mask_nxv1i64(<vscale x 1 x i64> %val, i64* %base, <vscale x 1 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsseg8_mask_nxv1i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg2_nxv1i32(<vscale x 1 x i32> %val, i32* %base, i64 %vl) {
; CHECK-LABEL: test_vsseg2_nxv1i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vsseg2e32.v v8, (a0)
define void @test_vsseg2_mask_nxv1i32(<vscale x 1 x i32> %val, i32* %base, <vscale x 1 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsseg2_mask_nxv1i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vsseg2e32.v v8, (a0), v0.t
define void @test_vsseg3_nxv1i32(<vscale x 1 x i32> %val, i32* %base, i64 %vl) {
; CHECK-LABEL: test_vsseg3_nxv1i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
define void @test_vsseg3_mask_nxv1i32(<vscale x 1 x i32> %val, i32* %base, <vscale x 1 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsseg3_mask_nxv1i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
define void @test_vsseg4_nxv1i32(<vscale x 1 x i32> %val, i32* %base, i64 %vl) {
; CHECK-LABEL: test_vsseg4_nxv1i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg4_mask_nxv1i32(<vscale x 1 x i32> %val, i32* %base, <vscale x 1 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsseg4_mask_nxv1i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg5_nxv1i32(<vscale x 1 x i32> %val, i32* %base, i64 %vl) {
; CHECK-LABEL: test_vsseg5_nxv1i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg5_mask_nxv1i32(<vscale x 1 x i32> %val, i32* %base, <vscale x 1 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsseg5_mask_nxv1i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg6_nxv1i32(<vscale x 1 x i32> %val, i32* %base, i64 %vl) {
; CHECK-LABEL: test_vsseg6_nxv1i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg6_mask_nxv1i32(<vscale x 1 x i32> %val, i32* %base, <vscale x 1 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsseg6_mask_nxv1i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg7_nxv1i32(<vscale x 1 x i32> %val, i32* %base, i64 %vl) {
; CHECK-LABEL: test_vsseg7_nxv1i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg7_mask_nxv1i32(<vscale x 1 x i32> %val, i32* %base, <vscale x 1 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsseg7_mask_nxv1i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg8_nxv1i32(<vscale x 1 x i32> %val, i32* %base, i64 %vl) {
; CHECK-LABEL: test_vsseg8_nxv1i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg8_mask_nxv1i32(<vscale x 1 x i32> %val, i32* %base, <vscale x 1 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsseg8_mask_nxv1i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg2_nxv8i16(<vscale x 8 x i16> %val, i16* %base, i64 %vl) {
; CHECK-LABEL: test_vsseg2_nxv8i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
; CHECK-NEXT: vsseg2e16.v v8, (a0)
define void @test_vsseg2_mask_nxv8i16(<vscale x 8 x i16> %val, i16* %base, <vscale x 8 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsseg2_mask_nxv8i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
; CHECK-NEXT: vsseg2e16.v v8, (a0), v0.t
define void @test_vsseg3_nxv8i16(<vscale x 8 x i16> %val, i16* %base, i64 %vl) {
; CHECK-LABEL: test_vsseg3_nxv8i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
define void @test_vsseg3_mask_nxv8i16(<vscale x 8 x i16> %val, i16* %base, <vscale x 8 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsseg3_mask_nxv8i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
define void @test_vsseg4_nxv8i16(<vscale x 8 x i16> %val, i16* %base, i64 %vl) {
; CHECK-LABEL: test_vsseg4_nxv8i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2_v14m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vmv2r.v v14, v8
define void @test_vsseg4_mask_nxv8i16(<vscale x 8 x i16> %val, i16* %base, <vscale x 8 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsseg4_mask_nxv8i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2_v14m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vmv2r.v v14, v8
define void @test_vsseg2_nxv4i8(<vscale x 4 x i8> %val, i8* %base, i64 %vl) {
; CHECK-LABEL: test_vsseg2_nxv4i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vsseg2e8.v v8, (a0)
define void @test_vsseg2_mask_nxv4i8(<vscale x 4 x i8> %val, i8* %base, <vscale x 4 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsseg2_mask_nxv4i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vsseg2e8.v v8, (a0), v0.t
define void @test_vsseg3_nxv4i8(<vscale x 4 x i8> %val, i8* %base, i64 %vl) {
; CHECK-LABEL: test_vsseg3_nxv4i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
define void @test_vsseg3_mask_nxv4i8(<vscale x 4 x i8> %val, i8* %base, <vscale x 4 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsseg3_mask_nxv4i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
define void @test_vsseg4_nxv4i8(<vscale x 4 x i8> %val, i8* %base, i64 %vl) {
; CHECK-LABEL: test_vsseg4_nxv4i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg4_mask_nxv4i8(<vscale x 4 x i8> %val, i8* %base, <vscale x 4 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsseg4_mask_nxv4i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg5_nxv4i8(<vscale x 4 x i8> %val, i8* %base, i64 %vl) {
; CHECK-LABEL: test_vsseg5_nxv4i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg5_mask_nxv4i8(<vscale x 4 x i8> %val, i8* %base, <vscale x 4 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsseg5_mask_nxv4i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg6_nxv4i8(<vscale x 4 x i8> %val, i8* %base, i64 %vl) {
; CHECK-LABEL: test_vsseg6_nxv4i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg6_mask_nxv4i8(<vscale x 4 x i8> %val, i8* %base, <vscale x 4 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsseg6_mask_nxv4i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg7_nxv4i8(<vscale x 4 x i8> %val, i8* %base, i64 %vl) {
; CHECK-LABEL: test_vsseg7_nxv4i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg7_mask_nxv4i8(<vscale x 4 x i8> %val, i8* %base, <vscale x 4 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsseg7_mask_nxv4i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg8_nxv4i8(<vscale x 4 x i8> %val, i8* %base, i64 %vl) {
; CHECK-LABEL: test_vsseg8_nxv4i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg8_mask_nxv4i8(<vscale x 4 x i8> %val, i8* %base, <vscale x 4 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsseg8_mask_nxv4i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg2_nxv1i16(<vscale x 1 x i16> %val, i16* %base, i64 %vl) {
; CHECK-LABEL: test_vsseg2_nxv1i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vsseg2e16.v v8, (a0)
define void @test_vsseg2_mask_nxv1i16(<vscale x 1 x i16> %val, i16* %base, <vscale x 1 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsseg2_mask_nxv1i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vsseg2e16.v v8, (a0), v0.t
define void @test_vsseg3_nxv1i16(<vscale x 1 x i16> %val, i16* %base, i64 %vl) {
; CHECK-LABEL: test_vsseg3_nxv1i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
define void @test_vsseg3_mask_nxv1i16(<vscale x 1 x i16> %val, i16* %base, <vscale x 1 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsseg3_mask_nxv1i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
define void @test_vsseg4_nxv1i16(<vscale x 1 x i16> %val, i16* %base, i64 %vl) {
; CHECK-LABEL: test_vsseg4_nxv1i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg4_mask_nxv1i16(<vscale x 1 x i16> %val, i16* %base, <vscale x 1 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsseg4_mask_nxv1i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg5_nxv1i16(<vscale x 1 x i16> %val, i16* %base, i64 %vl) {
; CHECK-LABEL: test_vsseg5_nxv1i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg5_mask_nxv1i16(<vscale x 1 x i16> %val, i16* %base, <vscale x 1 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsseg5_mask_nxv1i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg6_nxv1i16(<vscale x 1 x i16> %val, i16* %base, i64 %vl) {
; CHECK-LABEL: test_vsseg6_nxv1i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg6_mask_nxv1i16(<vscale x 1 x i16> %val, i16* %base, <vscale x 1 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsseg6_mask_nxv1i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg7_nxv1i16(<vscale x 1 x i16> %val, i16* %base, i64 %vl) {
; CHECK-LABEL: test_vsseg7_nxv1i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg7_mask_nxv1i16(<vscale x 1 x i16> %val, i16* %base, <vscale x 1 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsseg7_mask_nxv1i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg8_nxv1i16(<vscale x 1 x i16> %val, i16* %base, i64 %vl) {
; CHECK-LABEL: test_vsseg8_nxv1i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg8_mask_nxv1i16(<vscale x 1 x i16> %val, i16* %base, <vscale x 1 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsseg8_mask_nxv1i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg2_nxv2i32(<vscale x 2 x i32> %val, i32* %base, i64 %vl) {
; CHECK-LABEL: test_vsseg2_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vsseg2e32.v v8, (a0)
define void @test_vsseg2_mask_nxv2i32(<vscale x 2 x i32> %val, i32* %base, <vscale x 2 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsseg2_mask_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vsseg2e32.v v8, (a0), v0.t
define void @test_vsseg3_nxv2i32(<vscale x 2 x i32> %val, i32* %base, i64 %vl) {
; CHECK-LABEL: test_vsseg3_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
define void @test_vsseg3_mask_nxv2i32(<vscale x 2 x i32> %val, i32* %base, <vscale x 2 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsseg3_mask_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
define void @test_vsseg4_nxv2i32(<vscale x 2 x i32> %val, i32* %base, i64 %vl) {
; CHECK-LABEL: test_vsseg4_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg4_mask_nxv2i32(<vscale x 2 x i32> %val, i32* %base, <vscale x 2 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsseg4_mask_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg5_nxv2i32(<vscale x 2 x i32> %val, i32* %base, i64 %vl) {
; CHECK-LABEL: test_vsseg5_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg5_mask_nxv2i32(<vscale x 2 x i32> %val, i32* %base, <vscale x 2 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsseg5_mask_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg6_nxv2i32(<vscale x 2 x i32> %val, i32* %base, i64 %vl) {
; CHECK-LABEL: test_vsseg6_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg6_mask_nxv2i32(<vscale x 2 x i32> %val, i32* %base, <vscale x 2 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsseg6_mask_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg7_nxv2i32(<vscale x 2 x i32> %val, i32* %base, i64 %vl) {
; CHECK-LABEL: test_vsseg7_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg7_mask_nxv2i32(<vscale x 2 x i32> %val, i32* %base, <vscale x 2 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsseg7_mask_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg8_nxv2i32(<vscale x 2 x i32> %val, i32* %base, i64 %vl) {
; CHECK-LABEL: test_vsseg8_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg8_mask_nxv2i32(<vscale x 2 x i32> %val, i32* %base, <vscale x 2 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsseg8_mask_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg2_nxv8i8(<vscale x 8 x i8> %val, i8* %base, i64 %vl) {
; CHECK-LABEL: test_vsseg2_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vsseg2e8.v v8, (a0)
define void @test_vsseg2_mask_nxv8i8(<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsseg2_mask_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vsseg2e8.v v8, (a0), v0.t
define void @test_vsseg3_nxv8i8(<vscale x 8 x i8> %val, i8* %base, i64 %vl) {
; CHECK-LABEL: test_vsseg3_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
define void @test_vsseg3_mask_nxv8i8(<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsseg3_mask_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
define void @test_vsseg4_nxv8i8(<vscale x 8 x i8> %val, i8* %base, i64 %vl) {
; CHECK-LABEL: test_vsseg4_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg4_mask_nxv8i8(<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsseg4_mask_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg5_nxv8i8(<vscale x 8 x i8> %val, i8* %base, i64 %vl) {
; CHECK-LABEL: test_vsseg5_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg5_mask_nxv8i8(<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsseg5_mask_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg6_nxv8i8(<vscale x 8 x i8> %val, i8* %base, i64 %vl) {
; CHECK-LABEL: test_vsseg6_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg6_mask_nxv8i8(<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsseg6_mask_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg7_nxv8i8(<vscale x 8 x i8> %val, i8* %base, i64 %vl) {
; CHECK-LABEL: test_vsseg7_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg7_mask_nxv8i8(<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsseg7_mask_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg8_nxv8i8(<vscale x 8 x i8> %val, i8* %base, i64 %vl) {
; CHECK-LABEL: test_vsseg8_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg8_mask_nxv8i8(<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsseg8_mask_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg2_nxv4i64(<vscale x 4 x i64> %val, i64* %base, i64 %vl) {
; CHECK-LABEL: test_vsseg2_nxv4i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu
; CHECK-NEXT: vsseg2e64.v v8, (a0)
define void @test_vsseg2_mask_nxv4i64(<vscale x 4 x i64> %val, i64* %base, <vscale x 4 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsseg2_mask_nxv4i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu
; CHECK-NEXT: vsseg2e64.v v8, (a0), v0.t
define void @test_vsseg2_nxv4i16(<vscale x 4 x i16> %val, i16* %base, i64 %vl) {
; CHECK-LABEL: test_vsseg2_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vsseg2e16.v v8, (a0)
define void @test_vsseg2_mask_nxv4i16(<vscale x 4 x i16> %val, i16* %base, <vscale x 4 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsseg2_mask_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vsseg2e16.v v8, (a0), v0.t
define void @test_vsseg3_nxv4i16(<vscale x 4 x i16> %val, i16* %base, i64 %vl) {
; CHECK-LABEL: test_vsseg3_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
define void @test_vsseg3_mask_nxv4i16(<vscale x 4 x i16> %val, i16* %base, <vscale x 4 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsseg3_mask_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
define void @test_vsseg4_nxv4i16(<vscale x 4 x i16> %val, i16* %base, i64 %vl) {
; CHECK-LABEL: test_vsseg4_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg4_mask_nxv4i16(<vscale x 4 x i16> %val, i16* %base, <vscale x 4 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsseg4_mask_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg5_nxv4i16(<vscale x 4 x i16> %val, i16* %base, i64 %vl) {
; CHECK-LABEL: test_vsseg5_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg5_mask_nxv4i16(<vscale x 4 x i16> %val, i16* %base, <vscale x 4 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsseg5_mask_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg6_nxv4i16(<vscale x 4 x i16> %val, i16* %base, i64 %vl) {
; CHECK-LABEL: test_vsseg6_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg6_mask_nxv4i16(<vscale x 4 x i16> %val, i16* %base, <vscale x 4 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsseg6_mask_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg7_nxv4i16(<vscale x 4 x i16> %val, i16* %base, i64 %vl) {
; CHECK-LABEL: test_vsseg7_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg7_mask_nxv4i16(<vscale x 4 x i16> %val, i16* %base, <vscale x 4 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsseg7_mask_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg8_nxv4i16(<vscale x 4 x i16> %val, i16* %base, i64 %vl) {
; CHECK-LABEL: test_vsseg8_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg8_mask_nxv4i16(<vscale x 4 x i16> %val, i16* %base, <vscale x 4 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsseg8_mask_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg2_nxv1i8(<vscale x 1 x i8> %val, i8* %base, i64 %vl) {
; CHECK-LABEL: test_vsseg2_nxv1i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vsseg2e8.v v8, (a0)
define void @test_vsseg2_mask_nxv1i8(<vscale x 1 x i8> %val, i8* %base, <vscale x 1 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsseg2_mask_nxv1i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vsseg2e8.v v8, (a0), v0.t
define void @test_vsseg3_nxv1i8(<vscale x 1 x i8> %val, i8* %base, i64 %vl) {
; CHECK-LABEL: test_vsseg3_nxv1i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
define void @test_vsseg3_mask_nxv1i8(<vscale x 1 x i8> %val, i8* %base, <vscale x 1 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsseg3_mask_nxv1i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
define void @test_vsseg4_nxv1i8(<vscale x 1 x i8> %val, i8* %base, i64 %vl) {
; CHECK-LABEL: test_vsseg4_nxv1i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg4_mask_nxv1i8(<vscale x 1 x i8> %val, i8* %base, <vscale x 1 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsseg4_mask_nxv1i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg5_nxv1i8(<vscale x 1 x i8> %val, i8* %base, i64 %vl) {
; CHECK-LABEL: test_vsseg5_nxv1i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg5_mask_nxv1i8(<vscale x 1 x i8> %val, i8* %base, <vscale x 1 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsseg5_mask_nxv1i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg6_nxv1i8(<vscale x 1 x i8> %val, i8* %base, i64 %vl) {
; CHECK-LABEL: test_vsseg6_nxv1i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg6_mask_nxv1i8(<vscale x 1 x i8> %val, i8* %base, <vscale x 1 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsseg6_mask_nxv1i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg7_nxv1i8(<vscale x 1 x i8> %val, i8* %base, i64 %vl) {
; CHECK-LABEL: test_vsseg7_nxv1i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg7_mask_nxv1i8(<vscale x 1 x i8> %val, i8* %base, <vscale x 1 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsseg7_mask_nxv1i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg8_nxv1i8(<vscale x 1 x i8> %val, i8* %base, i64 %vl) {
; CHECK-LABEL: test_vsseg8_nxv1i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg8_mask_nxv1i8(<vscale x 1 x i8> %val, i8* %base, <vscale x 1 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsseg8_mask_nxv1i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg2_nxv2i8(<vscale x 2 x i8> %val, i8* %base, i64 %vl) {
; CHECK-LABEL: test_vsseg2_nxv2i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vsseg2e8.v v8, (a0)
define void @test_vsseg2_mask_nxv2i8(<vscale x 2 x i8> %val, i8* %base, <vscale x 2 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsseg2_mask_nxv2i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vsseg2e8.v v8, (a0), v0.t
define void @test_vsseg3_nxv2i8(<vscale x 2 x i8> %val, i8* %base, i64 %vl) {
; CHECK-LABEL: test_vsseg3_nxv2i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
define void @test_vsseg3_mask_nxv2i8(<vscale x 2 x i8> %val, i8* %base, <vscale x 2 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsseg3_mask_nxv2i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
define void @test_vsseg4_nxv2i8(<vscale x 2 x i8> %val, i8* %base, i64 %vl) {
; CHECK-LABEL: test_vsseg4_nxv2i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg4_mask_nxv2i8(<vscale x 2 x i8> %val, i8* %base, <vscale x 2 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsseg4_mask_nxv2i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg5_nxv2i8(<vscale x 2 x i8> %val, i8* %base, i64 %vl) {
; CHECK-LABEL: test_vsseg5_nxv2i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg5_mask_nxv2i8(<vscale x 2 x i8> %val, i8* %base, <vscale x 2 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsseg5_mask_nxv2i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg6_nxv2i8(<vscale x 2 x i8> %val, i8* %base, i64 %vl) {
; CHECK-LABEL: test_vsseg6_nxv2i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg6_mask_nxv2i8(<vscale x 2 x i8> %val, i8* %base, <vscale x 2 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsseg6_mask_nxv2i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg7_nxv2i8(<vscale x 2 x i8> %val, i8* %base, i64 %vl) {
; CHECK-LABEL: test_vsseg7_nxv2i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg7_mask_nxv2i8(<vscale x 2 x i8> %val, i8* %base, <vscale x 2 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsseg7_mask_nxv2i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg8_nxv2i8(<vscale x 2 x i8> %val, i8* %base, i64 %vl) {
; CHECK-LABEL: test_vsseg8_nxv2i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg8_mask_nxv2i8(<vscale x 2 x i8> %val, i8* %base, <vscale x 2 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsseg8_mask_nxv2i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg2_nxv8i32(<vscale x 8 x i32> %val, i32* %base, i64 %vl) {
; CHECK-LABEL: test_vsseg2_nxv8i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
; CHECK-NEXT: vsseg2e32.v v8, (a0)
define void @test_vsseg2_mask_nxv8i32(<vscale x 8 x i32> %val, i32* %base, <vscale x 8 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsseg2_mask_nxv8i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
; CHECK-NEXT: vsseg2e32.v v8, (a0), v0.t
define void @test_vsseg2_nxv32i8(<vscale x 32 x i8> %val, i8* %base, i64 %vl) {
; CHECK-LABEL: test_vsseg2_nxv32i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu
; CHECK-NEXT: vsseg2e8.v v8, (a0)
define void @test_vsseg2_mask_nxv32i8(<vscale x 32 x i8> %val, i8* %base, <vscale x 32 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsseg2_mask_nxv32i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu
; CHECK-NEXT: vsseg2e8.v v8, (a0), v0.t
define void @test_vsseg2_nxv2i16(<vscale x 2 x i16> %val, i16* %base, i64 %vl) {
; CHECK-LABEL: test_vsseg2_nxv2i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vsseg2e16.v v8, (a0)
define void @test_vsseg2_mask_nxv2i16(<vscale x 2 x i16> %val, i16* %base, <vscale x 2 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsseg2_mask_nxv2i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vsseg2e16.v v8, (a0), v0.t
define void @test_vsseg3_nxv2i16(<vscale x 2 x i16> %val, i16* %base, i64 %vl) {
; CHECK-LABEL: test_vsseg3_nxv2i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
define void @test_vsseg3_mask_nxv2i16(<vscale x 2 x i16> %val, i16* %base, <vscale x 2 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsseg3_mask_nxv2i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
define void @test_vsseg4_nxv2i16(<vscale x 2 x i16> %val, i16* %base, i64 %vl) {
; CHECK-LABEL: test_vsseg4_nxv2i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg4_mask_nxv2i16(<vscale x 2 x i16> %val, i16* %base, <vscale x 2 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsseg4_mask_nxv2i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg5_nxv2i16(<vscale x 2 x i16> %val, i16* %base, i64 %vl) {
; CHECK-LABEL: test_vsseg5_nxv2i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg5_mask_nxv2i16(<vscale x 2 x i16> %val, i16* %base, <vscale x 2 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsseg5_mask_nxv2i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg6_nxv2i16(<vscale x 2 x i16> %val, i16* %base, i64 %vl) {
; CHECK-LABEL: test_vsseg6_nxv2i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg6_mask_nxv2i16(<vscale x 2 x i16> %val, i16* %base, <vscale x 2 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsseg6_mask_nxv2i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg7_nxv2i16(<vscale x 2 x i16> %val, i16* %base, i64 %vl) {
; CHECK-LABEL: test_vsseg7_nxv2i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg7_mask_nxv2i16(<vscale x 2 x i16> %val, i16* %base, <vscale x 2 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsseg7_mask_nxv2i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg8_nxv2i16(<vscale x 2 x i16> %val, i16* %base, i64 %vl) {
; CHECK-LABEL: test_vsseg8_nxv2i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg8_mask_nxv2i16(<vscale x 2 x i16> %val, i16* %base, <vscale x 2 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsseg8_mask_nxv2i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg2_nxv2i64(<vscale x 2 x i64> %val, i64* %base, i64 %vl) {
; CHECK-LABEL: test_vsseg2_nxv2i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
; CHECK-NEXT: vsseg2e64.v v8, (a0)
define void @test_vsseg2_mask_nxv2i64(<vscale x 2 x i64> %val, i64* %base, <vscale x 2 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsseg2_mask_nxv2i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
; CHECK-NEXT: vsseg2e64.v v8, (a0), v0.t
define void @test_vsseg3_nxv2i64(<vscale x 2 x i64> %val, i64* %base, i64 %vl) {
; CHECK-LABEL: test_vsseg3_nxv2i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
define void @test_vsseg3_mask_nxv2i64(<vscale x 2 x i64> %val, i64* %base, <vscale x 2 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsseg3_mask_nxv2i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
define void @test_vsseg4_nxv2i64(<vscale x 2 x i64> %val, i64* %base, i64 %vl) {
; CHECK-LABEL: test_vsseg4_nxv2i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2_v14m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vmv2r.v v14, v8
define void @test_vsseg4_mask_nxv2i64(<vscale x 2 x i64> %val, i64* %base, <vscale x 2 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsseg4_mask_nxv2i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2_v14m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vmv2r.v v14, v8
define void @test_vsseg2_nxv16f16(<vscale x 16 x half> %val, half* %base, i64 %vl) {
; CHECK-LABEL: test_vsseg2_nxv16f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
; CHECK-NEXT: vsseg2e16.v v8, (a0)
define void @test_vsseg2_mask_nxv16f16(<vscale x 16 x half> %val, half* %base, <vscale x 16 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsseg2_mask_nxv16f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
; CHECK-NEXT: vsseg2e16.v v8, (a0), v0.t
define void @test_vsseg2_nxv4f64(<vscale x 4 x double> %val, double* %base, i64 %vl) {
; CHECK-LABEL: test_vsseg2_nxv4f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu
; CHECK-NEXT: vsseg2e64.v v8, (a0)
define void @test_vsseg2_mask_nxv4f64(<vscale x 4 x double> %val, double* %base, <vscale x 4 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsseg2_mask_nxv4f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu
; CHECK-NEXT: vsseg2e64.v v8, (a0), v0.t
define void @test_vsseg2_nxv1f64(<vscale x 1 x double> %val, double* %base, i64 %vl) {
; CHECK-LABEL: test_vsseg2_nxv1f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vsseg2e64.v v8, (a0)
define void @test_vsseg2_mask_nxv1f64(<vscale x 1 x double> %val, double* %base, <vscale x 1 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsseg2_mask_nxv1f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vsseg2e64.v v8, (a0), v0.t
define void @test_vsseg3_nxv1f64(<vscale x 1 x double> %val, double* %base, i64 %vl) {
; CHECK-LABEL: test_vsseg3_nxv1f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
define void @test_vsseg3_mask_nxv1f64(<vscale x 1 x double> %val, double* %base, <vscale x 1 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsseg3_mask_nxv1f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
define void @test_vsseg4_nxv1f64(<vscale x 1 x double> %val, double* %base, i64 %vl) {
; CHECK-LABEL: test_vsseg4_nxv1f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg4_mask_nxv1f64(<vscale x 1 x double> %val, double* %base, <vscale x 1 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsseg4_mask_nxv1f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg5_nxv1f64(<vscale x 1 x double> %val, double* %base, i64 %vl) {
; CHECK-LABEL: test_vsseg5_nxv1f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg5_mask_nxv1f64(<vscale x 1 x double> %val, double* %base, <vscale x 1 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsseg5_mask_nxv1f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg6_nxv1f64(<vscale x 1 x double> %val, double* %base, i64 %vl) {
; CHECK-LABEL: test_vsseg6_nxv1f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg6_mask_nxv1f64(<vscale x 1 x double> %val, double* %base, <vscale x 1 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsseg6_mask_nxv1f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg7_nxv1f64(<vscale x 1 x double> %val, double* %base, i64 %vl) {
; CHECK-LABEL: test_vsseg7_nxv1f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg7_mask_nxv1f64(<vscale x 1 x double> %val, double* %base, <vscale x 1 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsseg7_mask_nxv1f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg8_nxv1f64(<vscale x 1 x double> %val, double* %base, i64 %vl) {
; CHECK-LABEL: test_vsseg8_nxv1f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg8_mask_nxv1f64(<vscale x 1 x double> %val, double* %base, <vscale x 1 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsseg8_mask_nxv1f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg2_nxv2f32(<vscale x 2 x float> %val, float* %base, i64 %vl) {
; CHECK-LABEL: test_vsseg2_nxv2f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vsseg2e32.v v8, (a0)
define void @test_vsseg2_mask_nxv2f32(<vscale x 2 x float> %val, float* %base, <vscale x 2 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsseg2_mask_nxv2f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vsseg2e32.v v8, (a0), v0.t
define void @test_vsseg3_nxv2f32(<vscale x 2 x float> %val, float* %base, i64 %vl) {
; CHECK-LABEL: test_vsseg3_nxv2f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
define void @test_vsseg3_mask_nxv2f32(<vscale x 2 x float> %val, float* %base, <vscale x 2 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsseg3_mask_nxv2f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
define void @test_vsseg4_nxv2f32(<vscale x 2 x float> %val, float* %base, i64 %vl) {
; CHECK-LABEL: test_vsseg4_nxv2f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg4_mask_nxv2f32(<vscale x 2 x float> %val, float* %base, <vscale x 2 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsseg4_mask_nxv2f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg5_nxv2f32(<vscale x 2 x float> %val, float* %base, i64 %vl) {
; CHECK-LABEL: test_vsseg5_nxv2f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg5_mask_nxv2f32(<vscale x 2 x float> %val, float* %base, <vscale x 2 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsseg5_mask_nxv2f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg6_nxv2f32(<vscale x 2 x float> %val, float* %base, i64 %vl) {
; CHECK-LABEL: test_vsseg6_nxv2f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg6_mask_nxv2f32(<vscale x 2 x float> %val, float* %base, <vscale x 2 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsseg6_mask_nxv2f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg7_nxv2f32(<vscale x 2 x float> %val, float* %base, i64 %vl) {
; CHECK-LABEL: test_vsseg7_nxv2f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg7_mask_nxv2f32(<vscale x 2 x float> %val, float* %base, <vscale x 2 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsseg7_mask_nxv2f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg8_nxv2f32(<vscale x 2 x float> %val, float* %base, i64 %vl) {
; CHECK-LABEL: test_vsseg8_nxv2f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg8_mask_nxv2f32(<vscale x 2 x float> %val, float* %base, <vscale x 2 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsseg8_mask_nxv2f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg2_nxv1f16(<vscale x 1 x half> %val, half* %base, i64 %vl) {
; CHECK-LABEL: test_vsseg2_nxv1f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vsseg2e16.v v8, (a0)
define void @test_vsseg2_mask_nxv1f16(<vscale x 1 x half> %val, half* %base, <vscale x 1 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsseg2_mask_nxv1f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vsseg2e16.v v8, (a0), v0.t
define void @test_vsseg3_nxv1f16(<vscale x 1 x half> %val, half* %base, i64 %vl) {
; CHECK-LABEL: test_vsseg3_nxv1f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
define void @test_vsseg3_mask_nxv1f16(<vscale x 1 x half> %val, half* %base, <vscale x 1 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsseg3_mask_nxv1f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
define void @test_vsseg4_nxv1f16(<vscale x 1 x half> %val, half* %base, i64 %vl) {
; CHECK-LABEL: test_vsseg4_nxv1f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg4_mask_nxv1f16(<vscale x 1 x half> %val, half* %base, <vscale x 1 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsseg4_mask_nxv1f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg5_nxv1f16(<vscale x 1 x half> %val, half* %base, i64 %vl) {
; CHECK-LABEL: test_vsseg5_nxv1f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg5_mask_nxv1f16(<vscale x 1 x half> %val, half* %base, <vscale x 1 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsseg5_mask_nxv1f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg6_nxv1f16(<vscale x 1 x half> %val, half* %base, i64 %vl) {
; CHECK-LABEL: test_vsseg6_nxv1f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg6_mask_nxv1f16(<vscale x 1 x half> %val, half* %base, <vscale x 1 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsseg6_mask_nxv1f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg7_nxv1f16(<vscale x 1 x half> %val, half* %base, i64 %vl) {
; CHECK-LABEL: test_vsseg7_nxv1f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg7_mask_nxv1f16(<vscale x 1 x half> %val, half* %base, <vscale x 1 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsseg7_mask_nxv1f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg8_nxv1f16(<vscale x 1 x half> %val, half* %base, i64 %vl) {
; CHECK-LABEL: test_vsseg8_nxv1f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg8_mask_nxv1f16(<vscale x 1 x half> %val, half* %base, <vscale x 1 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsseg8_mask_nxv1f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg2_nxv1f32(<vscale x 1 x float> %val, float* %base, i64 %vl) {
; CHECK-LABEL: test_vsseg2_nxv1f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vsseg2e32.v v8, (a0)
define void @test_vsseg2_mask_nxv1f32(<vscale x 1 x float> %val, float* %base, <vscale x 1 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsseg2_mask_nxv1f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vsseg2e32.v v8, (a0), v0.t
define void @test_vsseg3_nxv1f32(<vscale x 1 x float> %val, float* %base, i64 %vl) {
; CHECK-LABEL: test_vsseg3_nxv1f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
define void @test_vsseg3_mask_nxv1f32(<vscale x 1 x float> %val, float* %base, <vscale x 1 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsseg3_mask_nxv1f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
define void @test_vsseg4_nxv1f32(<vscale x 1 x float> %val, float* %base, i64 %vl) {
; CHECK-LABEL: test_vsseg4_nxv1f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg4_mask_nxv1f32(<vscale x 1 x float> %val, float* %base, <vscale x 1 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsseg4_mask_nxv1f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg5_nxv1f32(<vscale x 1 x float> %val, float* %base, i64 %vl) {
; CHECK-LABEL: test_vsseg5_nxv1f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg5_mask_nxv1f32(<vscale x 1 x float> %val, float* %base, <vscale x 1 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsseg5_mask_nxv1f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg6_nxv1f32(<vscale x 1 x float> %val, float* %base, i64 %vl) {
; CHECK-LABEL: test_vsseg6_nxv1f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg6_mask_nxv1f32(<vscale x 1 x float> %val, float* %base, <vscale x 1 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsseg6_mask_nxv1f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg7_nxv1f32(<vscale x 1 x float> %val, float* %base, i64 %vl) {
; CHECK-LABEL: test_vsseg7_nxv1f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg7_mask_nxv1f32(<vscale x 1 x float> %val, float* %base, <vscale x 1 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsseg7_mask_nxv1f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg8_nxv1f32(<vscale x 1 x float> %val, float* %base, i64 %vl) {
; CHECK-LABEL: test_vsseg8_nxv1f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg8_mask_nxv1f32(<vscale x 1 x float> %val, float* %base, <vscale x 1 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsseg8_mask_nxv1f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg2_nxv8f16(<vscale x 8 x half> %val, half* %base, i64 %vl) {
; CHECK-LABEL: test_vsseg2_nxv8f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
; CHECK-NEXT: vsseg2e16.v v8, (a0)
define void @test_vsseg2_mask_nxv8f16(<vscale x 8 x half> %val, half* %base, <vscale x 8 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsseg2_mask_nxv8f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
; CHECK-NEXT: vsseg2e16.v v8, (a0), v0.t
define void @test_vsseg3_nxv8f16(<vscale x 8 x half> %val, half* %base, i64 %vl) {
; CHECK-LABEL: test_vsseg3_nxv8f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
define void @test_vsseg3_mask_nxv8f16(<vscale x 8 x half> %val, half* %base, <vscale x 8 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsseg3_mask_nxv8f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
define void @test_vsseg4_nxv8f16(<vscale x 8 x half> %val, half* %base, i64 %vl) {
; CHECK-LABEL: test_vsseg4_nxv8f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2_v14m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vmv2r.v v14, v8
define void @test_vsseg4_mask_nxv8f16(<vscale x 8 x half> %val, half* %base, <vscale x 8 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsseg4_mask_nxv8f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2_v14m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vmv2r.v v14, v8
define void @test_vsseg2_nxv8f32(<vscale x 8 x float> %val, float* %base, i64 %vl) {
; CHECK-LABEL: test_vsseg2_nxv8f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
; CHECK-NEXT: vsseg2e32.v v8, (a0)
define void @test_vsseg2_mask_nxv8f32(<vscale x 8 x float> %val, float* %base, <vscale x 8 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsseg2_mask_nxv8f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
; CHECK-NEXT: vsseg2e32.v v8, (a0), v0.t
define void @test_vsseg2_nxv2f64(<vscale x 2 x double> %val, double* %base, i64 %vl) {
; CHECK-LABEL: test_vsseg2_nxv2f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
; CHECK-NEXT: vsseg2e64.v v8, (a0)
define void @test_vsseg2_mask_nxv2f64(<vscale x 2 x double> %val, double* %base, <vscale x 2 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsseg2_mask_nxv2f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
; CHECK-NEXT: vsseg2e64.v v8, (a0), v0.t
define void @test_vsseg3_nxv2f64(<vscale x 2 x double> %val, double* %base, i64 %vl) {
; CHECK-LABEL: test_vsseg3_nxv2f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
define void @test_vsseg3_mask_nxv2f64(<vscale x 2 x double> %val, double* %base, <vscale x 2 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsseg3_mask_nxv2f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
define void @test_vsseg4_nxv2f64(<vscale x 2 x double> %val, double* %base, i64 %vl) {
; CHECK-LABEL: test_vsseg4_nxv2f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2_v14m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vmv2r.v v14, v8
define void @test_vsseg4_mask_nxv2f64(<vscale x 2 x double> %val, double* %base, <vscale x 2 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsseg4_mask_nxv2f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2_v14m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vmv2r.v v14, v8
define void @test_vsseg2_nxv4f16(<vscale x 4 x half> %val, half* %base, i64 %vl) {
; CHECK-LABEL: test_vsseg2_nxv4f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vsseg2e16.v v8, (a0)
define void @test_vsseg2_mask_nxv4f16(<vscale x 4 x half> %val, half* %base, <vscale x 4 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsseg2_mask_nxv4f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vsseg2e16.v v8, (a0), v0.t
define void @test_vsseg3_nxv4f16(<vscale x 4 x half> %val, half* %base, i64 %vl) {
; CHECK-LABEL: test_vsseg3_nxv4f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
define void @test_vsseg3_mask_nxv4f16(<vscale x 4 x half> %val, half* %base, <vscale x 4 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsseg3_mask_nxv4f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
define void @test_vsseg4_nxv4f16(<vscale x 4 x half> %val, half* %base, i64 %vl) {
; CHECK-LABEL: test_vsseg4_nxv4f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg4_mask_nxv4f16(<vscale x 4 x half> %val, half* %base, <vscale x 4 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsseg4_mask_nxv4f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg5_nxv4f16(<vscale x 4 x half> %val, half* %base, i64 %vl) {
; CHECK-LABEL: test_vsseg5_nxv4f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg5_mask_nxv4f16(<vscale x 4 x half> %val, half* %base, <vscale x 4 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsseg5_mask_nxv4f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg6_nxv4f16(<vscale x 4 x half> %val, half* %base, i64 %vl) {
; CHECK-LABEL: test_vsseg6_nxv4f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg6_mask_nxv4f16(<vscale x 4 x half> %val, half* %base, <vscale x 4 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsseg6_mask_nxv4f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg7_nxv4f16(<vscale x 4 x half> %val, half* %base, i64 %vl) {
; CHECK-LABEL: test_vsseg7_nxv4f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg7_mask_nxv4f16(<vscale x 4 x half> %val, half* %base, <vscale x 4 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsseg7_mask_nxv4f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg8_nxv4f16(<vscale x 4 x half> %val, half* %base, i64 %vl) {
; CHECK-LABEL: test_vsseg8_nxv4f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg8_mask_nxv4f16(<vscale x 4 x half> %val, half* %base, <vscale x 4 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsseg8_mask_nxv4f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg2_nxv2f16(<vscale x 2 x half> %val, half* %base, i64 %vl) {
; CHECK-LABEL: test_vsseg2_nxv2f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vsseg2e16.v v8, (a0)
define void @test_vsseg2_mask_nxv2f16(<vscale x 2 x half> %val, half* %base, <vscale x 2 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsseg2_mask_nxv2f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vsseg2e16.v v8, (a0), v0.t
define void @test_vsseg3_nxv2f16(<vscale x 2 x half> %val, half* %base, i64 %vl) {
; CHECK-LABEL: test_vsseg3_nxv2f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
define void @test_vsseg3_mask_nxv2f16(<vscale x 2 x half> %val, half* %base, <vscale x 2 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsseg3_mask_nxv2f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
define void @test_vsseg4_nxv2f16(<vscale x 2 x half> %val, half* %base, i64 %vl) {
; CHECK-LABEL: test_vsseg4_nxv2f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg4_mask_nxv2f16(<vscale x 2 x half> %val, half* %base, <vscale x 2 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsseg4_mask_nxv2f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg5_nxv2f16(<vscale x 2 x half> %val, half* %base, i64 %vl) {
; CHECK-LABEL: test_vsseg5_nxv2f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg5_mask_nxv2f16(<vscale x 2 x half> %val, half* %base, <vscale x 2 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsseg5_mask_nxv2f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg6_nxv2f16(<vscale x 2 x half> %val, half* %base, i64 %vl) {
; CHECK-LABEL: test_vsseg6_nxv2f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg6_mask_nxv2f16(<vscale x 2 x half> %val, half* %base, <vscale x 2 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsseg6_mask_nxv2f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg7_nxv2f16(<vscale x 2 x half> %val, half* %base, i64 %vl) {
; CHECK-LABEL: test_vsseg7_nxv2f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg7_mask_nxv2f16(<vscale x 2 x half> %val, half* %base, <vscale x 2 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsseg7_mask_nxv2f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg8_nxv2f16(<vscale x 2 x half> %val, half* %base, i64 %vl) {
; CHECK-LABEL: test_vsseg8_nxv2f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg8_mask_nxv2f16(<vscale x 2 x half> %val, half* %base, <vscale x 2 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsseg8_mask_nxv2f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsseg2_nxv4f32(<vscale x 4 x float> %val, float* %base, i64 %vl) {
; CHECK-LABEL: test_vsseg2_nxv4f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; CHECK-NEXT: vsseg2e32.v v8, (a0)
define void @test_vsseg2_mask_nxv4f32(<vscale x 4 x float> %val, float* %base, <vscale x 4 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsseg2_mask_nxv4f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; CHECK-NEXT: vsseg2e32.v v8, (a0), v0.t
define void @test_vsseg3_nxv4f32(<vscale x 4 x float> %val, float* %base, i64 %vl) {
; CHECK-LABEL: test_vsseg3_nxv4f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
define void @test_vsseg3_mask_nxv4f32(<vscale x 4 x float> %val, float* %base, <vscale x 4 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsseg3_mask_nxv4f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
define void @test_vsseg4_nxv4f32(<vscale x 4 x float> %val, float* %base, i64 %vl) {
; CHECK-LABEL: test_vsseg4_nxv4f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2_v14m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vmv2r.v v14, v8
define void @test_vsseg4_mask_nxv4f32(<vscale x 4 x float> %val, float* %base, <vscale x 4 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsseg4_mask_nxv4f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2_v14m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vmv2r.v v14, v8
define void @test_vssseg2_nxv16i16(<vscale x 16 x i16> %val, i16* %base, i32 %offset, i32 %vl) {
; CHECK-LABEL: test_vssseg2_nxv16i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a2, e16, m4, ta, mu
; CHECK-NEXT: vssseg2e16.v v8, (a0), a1
define void @test_vssseg2_mask_nxv16i16(<vscale x 16 x i16> %val, i16* %base, i32 %offset, <vscale x 16 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vssseg2_mask_nxv16i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a2, e16, m4, ta, mu
; CHECK-NEXT: vssseg2e16.v v8, (a0), a1, v0.t
define void @test_vssseg2_nxv1i8(<vscale x 1 x i8> %val, i8* %base, i32 %offset, i32 %vl) {
; CHECK-LABEL: test_vssseg2_nxv1i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a2, e8, mf8, ta, mu
; CHECK-NEXT: vssseg2e8.v v8, (a0), a1
define void @test_vssseg2_mask_nxv1i8(<vscale x 1 x i8> %val, i8* %base, i32 %offset, <vscale x 1 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vssseg2_mask_nxv1i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a2, e8, mf8, ta, mu
; CHECK-NEXT: vssseg2e8.v v8, (a0), a1, v0.t
define void @test_vssseg3_nxv1i8(<vscale x 1 x i8> %val, i8* %base, i32 %offset, i32 %vl) {
; CHECK-LABEL: test_vssseg3_nxv1i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a2, e8, mf8, ta, mu
define void @test_vssseg3_mask_nxv1i8(<vscale x 1 x i8> %val, i8* %base, i32 %offset, <vscale x 1 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vssseg3_mask_nxv1i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a2, e8, mf8, ta, mu
define void @test_vssseg4_nxv1i8(<vscale x 1 x i8> %val, i8* %base, i32 %offset, i32 %vl) {
; CHECK-LABEL: test_vssseg4_nxv1i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg4_mask_nxv1i8(<vscale x 1 x i8> %val, i8* %base, i32 %offset, <vscale x 1 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vssseg4_mask_nxv1i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg5_nxv1i8(<vscale x 1 x i8> %val, i8* %base, i32 %offset, i32 %vl) {
; CHECK-LABEL: test_vssseg5_nxv1i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg5_mask_nxv1i8(<vscale x 1 x i8> %val, i8* %base, i32 %offset, <vscale x 1 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vssseg5_mask_nxv1i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg6_nxv1i8(<vscale x 1 x i8> %val, i8* %base, i32 %offset, i32 %vl) {
; CHECK-LABEL: test_vssseg6_nxv1i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg6_mask_nxv1i8(<vscale x 1 x i8> %val, i8* %base, i32 %offset, <vscale x 1 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vssseg6_mask_nxv1i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg7_nxv1i8(<vscale x 1 x i8> %val, i8* %base, i32 %offset, i32 %vl) {
; CHECK-LABEL: test_vssseg7_nxv1i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg7_mask_nxv1i8(<vscale x 1 x i8> %val, i8* %base, i32 %offset, <vscale x 1 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vssseg7_mask_nxv1i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg8_nxv1i8(<vscale x 1 x i8> %val, i8* %base, i32 %offset, i32 %vl) {
; CHECK-LABEL: test_vssseg8_nxv1i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg8_mask_nxv1i8(<vscale x 1 x i8> %val, i8* %base, i32 %offset, <vscale x 1 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vssseg8_mask_nxv1i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg2_nxv16i8(<vscale x 16 x i8> %val, i8* %base, i32 %offset, i32 %vl) {
; CHECK-LABEL: test_vssseg2_nxv16i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a2, e8, m2, ta, mu
; CHECK-NEXT: vssseg2e8.v v8, (a0), a1
define void @test_vssseg2_mask_nxv16i8(<vscale x 16 x i8> %val, i8* %base, i32 %offset, <vscale x 16 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vssseg2_mask_nxv16i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a2, e8, m2, ta, mu
; CHECK-NEXT: vssseg2e8.v v8, (a0), a1, v0.t
define void @test_vssseg3_nxv16i8(<vscale x 16 x i8> %val, i8* %base, i32 %offset, i32 %vl) {
; CHECK-LABEL: test_vssseg3_nxv16i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vsetvli zero, a2, e8, m2, ta, mu
define void @test_vssseg3_mask_nxv16i8(<vscale x 16 x i8> %val, i8* %base, i32 %offset, <vscale x 16 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vssseg3_mask_nxv16i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vsetvli zero, a2, e8, m2, ta, mu
define void @test_vssseg4_nxv16i8(<vscale x 16 x i8> %val, i8* %base, i32 %offset, i32 %vl) {
; CHECK-LABEL: test_vssseg4_nxv16i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2_v14m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vmv2r.v v14, v8
define void @test_vssseg4_mask_nxv16i8(<vscale x 16 x i8> %val, i8* %base, i32 %offset, <vscale x 16 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vssseg4_mask_nxv16i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2_v14m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vmv2r.v v14, v8
define void @test_vssseg2_nxv2i32(<vscale x 2 x i32> %val, i32* %base, i32 %offset, i32 %vl) {
; CHECK-LABEL: test_vssseg2_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, mu
; CHECK-NEXT: vssseg2e32.v v8, (a0), a1
define void @test_vssseg2_mask_nxv2i32(<vscale x 2 x i32> %val, i32* %base, i32 %offset, <vscale x 2 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vssseg2_mask_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, mu
; CHECK-NEXT: vssseg2e32.v v8, (a0), a1, v0.t
define void @test_vssseg3_nxv2i32(<vscale x 2 x i32> %val, i32* %base, i32 %offset, i32 %vl) {
; CHECK-LABEL: test_vssseg3_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, mu
define void @test_vssseg3_mask_nxv2i32(<vscale x 2 x i32> %val, i32* %base, i32 %offset, <vscale x 2 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vssseg3_mask_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, mu
define void @test_vssseg4_nxv2i32(<vscale x 2 x i32> %val, i32* %base, i32 %offset, i32 %vl) {
; CHECK-LABEL: test_vssseg4_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg4_mask_nxv2i32(<vscale x 2 x i32> %val, i32* %base, i32 %offset, <vscale x 2 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vssseg4_mask_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg5_nxv2i32(<vscale x 2 x i32> %val, i32* %base, i32 %offset, i32 %vl) {
; CHECK-LABEL: test_vssseg5_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg5_mask_nxv2i32(<vscale x 2 x i32> %val, i32* %base, i32 %offset, <vscale x 2 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vssseg5_mask_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg6_nxv2i32(<vscale x 2 x i32> %val, i32* %base, i32 %offset, i32 %vl) {
; CHECK-LABEL: test_vssseg6_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg6_mask_nxv2i32(<vscale x 2 x i32> %val, i32* %base, i32 %offset, <vscale x 2 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vssseg6_mask_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg7_nxv2i32(<vscale x 2 x i32> %val, i32* %base, i32 %offset, i32 %vl) {
; CHECK-LABEL: test_vssseg7_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg7_mask_nxv2i32(<vscale x 2 x i32> %val, i32* %base, i32 %offset, <vscale x 2 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vssseg7_mask_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg8_nxv2i32(<vscale x 2 x i32> %val, i32* %base, i32 %offset, i32 %vl) {
; CHECK-LABEL: test_vssseg8_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg8_mask_nxv2i32(<vscale x 2 x i32> %val, i32* %base, i32 %offset, <vscale x 2 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vssseg8_mask_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg2_nxv4i16(<vscale x 4 x i16> %val, i16* %base, i32 %offset, i32 %vl) {
; CHECK-LABEL: test_vssseg2_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, mu
; CHECK-NEXT: vssseg2e16.v v8, (a0), a1
define void @test_vssseg2_mask_nxv4i16(<vscale x 4 x i16> %val, i16* %base, i32 %offset, <vscale x 4 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vssseg2_mask_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, mu
; CHECK-NEXT: vssseg2e16.v v8, (a0), a1, v0.t
define void @test_vssseg3_nxv4i16(<vscale x 4 x i16> %val, i16* %base, i32 %offset, i32 %vl) {
; CHECK-LABEL: test_vssseg3_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, mu
define void @test_vssseg3_mask_nxv4i16(<vscale x 4 x i16> %val, i16* %base, i32 %offset, <vscale x 4 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vssseg3_mask_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, mu
define void @test_vssseg4_nxv4i16(<vscale x 4 x i16> %val, i16* %base, i32 %offset, i32 %vl) {
; CHECK-LABEL: test_vssseg4_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg4_mask_nxv4i16(<vscale x 4 x i16> %val, i16* %base, i32 %offset, <vscale x 4 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vssseg4_mask_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg5_nxv4i16(<vscale x 4 x i16> %val, i16* %base, i32 %offset, i32 %vl) {
; CHECK-LABEL: test_vssseg5_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg5_mask_nxv4i16(<vscale x 4 x i16> %val, i16* %base, i32 %offset, <vscale x 4 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vssseg5_mask_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg6_nxv4i16(<vscale x 4 x i16> %val, i16* %base, i32 %offset, i32 %vl) {
; CHECK-LABEL: test_vssseg6_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg6_mask_nxv4i16(<vscale x 4 x i16> %val, i16* %base, i32 %offset, <vscale x 4 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vssseg6_mask_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg7_nxv4i16(<vscale x 4 x i16> %val, i16* %base, i32 %offset, i32 %vl) {
; CHECK-LABEL: test_vssseg7_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg7_mask_nxv4i16(<vscale x 4 x i16> %val, i16* %base, i32 %offset, <vscale x 4 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vssseg7_mask_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg8_nxv4i16(<vscale x 4 x i16> %val, i16* %base, i32 %offset, i32 %vl) {
; CHECK-LABEL: test_vssseg8_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg8_mask_nxv4i16(<vscale x 4 x i16> %val, i16* %base, i32 %offset, <vscale x 4 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vssseg8_mask_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg2_nxv1i32(<vscale x 1 x i32> %val, i32* %base, i32 %offset, i32 %vl) {
; CHECK-LABEL: test_vssseg2_nxv1i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, mu
; CHECK-NEXT: vssseg2e32.v v8, (a0), a1
define void @test_vssseg2_mask_nxv1i32(<vscale x 1 x i32> %val, i32* %base, i32 %offset, <vscale x 1 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vssseg2_mask_nxv1i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, mu
; CHECK-NEXT: vssseg2e32.v v8, (a0), a1, v0.t
define void @test_vssseg3_nxv1i32(<vscale x 1 x i32> %val, i32* %base, i32 %offset, i32 %vl) {
; CHECK-LABEL: test_vssseg3_nxv1i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, mu
define void @test_vssseg3_mask_nxv1i32(<vscale x 1 x i32> %val, i32* %base, i32 %offset, <vscale x 1 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vssseg3_mask_nxv1i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, mu
define void @test_vssseg4_nxv1i32(<vscale x 1 x i32> %val, i32* %base, i32 %offset, i32 %vl) {
; CHECK-LABEL: test_vssseg4_nxv1i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg4_mask_nxv1i32(<vscale x 1 x i32> %val, i32* %base, i32 %offset, <vscale x 1 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vssseg4_mask_nxv1i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg5_nxv1i32(<vscale x 1 x i32> %val, i32* %base, i32 %offset, i32 %vl) {
; CHECK-LABEL: test_vssseg5_nxv1i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg5_mask_nxv1i32(<vscale x 1 x i32> %val, i32* %base, i32 %offset, <vscale x 1 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vssseg5_mask_nxv1i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg6_nxv1i32(<vscale x 1 x i32> %val, i32* %base, i32 %offset, i32 %vl) {
; CHECK-LABEL: test_vssseg6_nxv1i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg6_mask_nxv1i32(<vscale x 1 x i32> %val, i32* %base, i32 %offset, <vscale x 1 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vssseg6_mask_nxv1i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg7_nxv1i32(<vscale x 1 x i32> %val, i32* %base, i32 %offset, i32 %vl) {
; CHECK-LABEL: test_vssseg7_nxv1i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg7_mask_nxv1i32(<vscale x 1 x i32> %val, i32* %base, i32 %offset, <vscale x 1 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vssseg7_mask_nxv1i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg8_nxv1i32(<vscale x 1 x i32> %val, i32* %base, i32 %offset, i32 %vl) {
; CHECK-LABEL: test_vssseg8_nxv1i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg8_mask_nxv1i32(<vscale x 1 x i32> %val, i32* %base, i32 %offset, <vscale x 1 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vssseg8_mask_nxv1i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg2_nxv8i16(<vscale x 8 x i16> %val, i16* %base, i32 %offset, i32 %vl) {
; CHECK-LABEL: test_vssseg2_nxv8i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, mu
; CHECK-NEXT: vssseg2e16.v v8, (a0), a1
define void @test_vssseg2_mask_nxv8i16(<vscale x 8 x i16> %val, i16* %base, i32 %offset, <vscale x 8 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vssseg2_mask_nxv8i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, mu
; CHECK-NEXT: vssseg2e16.v v8, (a0), a1, v0.t
define void @test_vssseg3_nxv8i16(<vscale x 8 x i16> %val, i16* %base, i32 %offset, i32 %vl) {
; CHECK-LABEL: test_vssseg3_nxv8i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, mu
define void @test_vssseg3_mask_nxv8i16(<vscale x 8 x i16> %val, i16* %base, i32 %offset, <vscale x 8 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vssseg3_mask_nxv8i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, mu
define void @test_vssseg4_nxv8i16(<vscale x 8 x i16> %val, i16* %base, i32 %offset, i32 %vl) {
; CHECK-LABEL: test_vssseg4_nxv8i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2_v14m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vmv2r.v v14, v8
define void @test_vssseg4_mask_nxv8i16(<vscale x 8 x i16> %val, i16* %base, i32 %offset, <vscale x 8 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vssseg4_mask_nxv8i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2_v14m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vmv2r.v v14, v8
define void @test_vssseg2_nxv8i8(<vscale x 8 x i8> %val, i8* %base, i32 %offset, i32 %vl) {
; CHECK-LABEL: test_vssseg2_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a2, e8, m1, ta, mu
; CHECK-NEXT: vssseg2e8.v v8, (a0), a1
define void @test_vssseg2_mask_nxv8i8(<vscale x 8 x i8> %val, i8* %base, i32 %offset, <vscale x 8 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vssseg2_mask_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a2, e8, m1, ta, mu
; CHECK-NEXT: vssseg2e8.v v8, (a0), a1, v0.t
define void @test_vssseg3_nxv8i8(<vscale x 8 x i8> %val, i8* %base, i32 %offset, i32 %vl) {
; CHECK-LABEL: test_vssseg3_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a2, e8, m1, ta, mu
define void @test_vssseg3_mask_nxv8i8(<vscale x 8 x i8> %val, i8* %base, i32 %offset, <vscale x 8 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vssseg3_mask_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a2, e8, m1, ta, mu
define void @test_vssseg4_nxv8i8(<vscale x 8 x i8> %val, i8* %base, i32 %offset, i32 %vl) {
; CHECK-LABEL: test_vssseg4_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg4_mask_nxv8i8(<vscale x 8 x i8> %val, i8* %base, i32 %offset, <vscale x 8 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vssseg4_mask_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg5_nxv8i8(<vscale x 8 x i8> %val, i8* %base, i32 %offset, i32 %vl) {
; CHECK-LABEL: test_vssseg5_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg5_mask_nxv8i8(<vscale x 8 x i8> %val, i8* %base, i32 %offset, <vscale x 8 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vssseg5_mask_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg6_nxv8i8(<vscale x 8 x i8> %val, i8* %base, i32 %offset, i32 %vl) {
; CHECK-LABEL: test_vssseg6_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg6_mask_nxv8i8(<vscale x 8 x i8> %val, i8* %base, i32 %offset, <vscale x 8 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vssseg6_mask_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg7_nxv8i8(<vscale x 8 x i8> %val, i8* %base, i32 %offset, i32 %vl) {
; CHECK-LABEL: test_vssseg7_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg7_mask_nxv8i8(<vscale x 8 x i8> %val, i8* %base, i32 %offset, <vscale x 8 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vssseg7_mask_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg8_nxv8i8(<vscale x 8 x i8> %val, i8* %base, i32 %offset, i32 %vl) {
; CHECK-LABEL: test_vssseg8_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg8_mask_nxv8i8(<vscale x 8 x i8> %val, i8* %base, i32 %offset, <vscale x 8 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vssseg8_mask_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg2_nxv8i32(<vscale x 8 x i32> %val, i32* %base, i32 %offset, i32 %vl) {
; CHECK-LABEL: test_vssseg2_nxv8i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a2, e32, m4, ta, mu
; CHECK-NEXT: vssseg2e32.v v8, (a0), a1
define void @test_vssseg2_mask_nxv8i32(<vscale x 8 x i32> %val, i32* %base, i32 %offset, <vscale x 8 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vssseg2_mask_nxv8i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a2, e32, m4, ta, mu
; CHECK-NEXT: vssseg2e32.v v8, (a0), a1, v0.t
define void @test_vssseg2_nxv4i8(<vscale x 4 x i8> %val, i8* %base, i32 %offset, i32 %vl) {
; CHECK-LABEL: test_vssseg2_nxv4i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a2, e8, mf2, ta, mu
; CHECK-NEXT: vssseg2e8.v v8, (a0), a1
define void @test_vssseg2_mask_nxv4i8(<vscale x 4 x i8> %val, i8* %base, i32 %offset, <vscale x 4 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vssseg2_mask_nxv4i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a2, e8, mf2, ta, mu
; CHECK-NEXT: vssseg2e8.v v8, (a0), a1, v0.t
define void @test_vssseg3_nxv4i8(<vscale x 4 x i8> %val, i8* %base, i32 %offset, i32 %vl) {
; CHECK-LABEL: test_vssseg3_nxv4i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a2, e8, mf2, ta, mu
define void @test_vssseg3_mask_nxv4i8(<vscale x 4 x i8> %val, i8* %base, i32 %offset, <vscale x 4 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vssseg3_mask_nxv4i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a2, e8, mf2, ta, mu
define void @test_vssseg4_nxv4i8(<vscale x 4 x i8> %val, i8* %base, i32 %offset, i32 %vl) {
; CHECK-LABEL: test_vssseg4_nxv4i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg4_mask_nxv4i8(<vscale x 4 x i8> %val, i8* %base, i32 %offset, <vscale x 4 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vssseg4_mask_nxv4i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg5_nxv4i8(<vscale x 4 x i8> %val, i8* %base, i32 %offset, i32 %vl) {
; CHECK-LABEL: test_vssseg5_nxv4i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg5_mask_nxv4i8(<vscale x 4 x i8> %val, i8* %base, i32 %offset, <vscale x 4 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vssseg5_mask_nxv4i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg6_nxv4i8(<vscale x 4 x i8> %val, i8* %base, i32 %offset, i32 %vl) {
; CHECK-LABEL: test_vssseg6_nxv4i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg6_mask_nxv4i8(<vscale x 4 x i8> %val, i8* %base, i32 %offset, <vscale x 4 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vssseg6_mask_nxv4i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg7_nxv4i8(<vscale x 4 x i8> %val, i8* %base, i32 %offset, i32 %vl) {
; CHECK-LABEL: test_vssseg7_nxv4i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg7_mask_nxv4i8(<vscale x 4 x i8> %val, i8* %base, i32 %offset, <vscale x 4 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vssseg7_mask_nxv4i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg8_nxv4i8(<vscale x 4 x i8> %val, i8* %base, i32 %offset, i32 %vl) {
; CHECK-LABEL: test_vssseg8_nxv4i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg8_mask_nxv4i8(<vscale x 4 x i8> %val, i8* %base, i32 %offset, <vscale x 4 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vssseg8_mask_nxv4i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg2_nxv1i16(<vscale x 1 x i16> %val, i16* %base, i32 %offset, i32 %vl) {
; CHECK-LABEL: test_vssseg2_nxv1i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, mu
; CHECK-NEXT: vssseg2e16.v v8, (a0), a1
define void @test_vssseg2_mask_nxv1i16(<vscale x 1 x i16> %val, i16* %base, i32 %offset, <vscale x 1 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vssseg2_mask_nxv1i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, mu
; CHECK-NEXT: vssseg2e16.v v8, (a0), a1, v0.t
define void @test_vssseg3_nxv1i16(<vscale x 1 x i16> %val, i16* %base, i32 %offset, i32 %vl) {
; CHECK-LABEL: test_vssseg3_nxv1i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, mu
define void @test_vssseg3_mask_nxv1i16(<vscale x 1 x i16> %val, i16* %base, i32 %offset, <vscale x 1 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vssseg3_mask_nxv1i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, mu
define void @test_vssseg4_nxv1i16(<vscale x 1 x i16> %val, i16* %base, i32 %offset, i32 %vl) {
; CHECK-LABEL: test_vssseg4_nxv1i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg4_mask_nxv1i16(<vscale x 1 x i16> %val, i16* %base, i32 %offset, <vscale x 1 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vssseg4_mask_nxv1i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg5_nxv1i16(<vscale x 1 x i16> %val, i16* %base, i32 %offset, i32 %vl) {
; CHECK-LABEL: test_vssseg5_nxv1i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg5_mask_nxv1i16(<vscale x 1 x i16> %val, i16* %base, i32 %offset, <vscale x 1 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vssseg5_mask_nxv1i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg6_nxv1i16(<vscale x 1 x i16> %val, i16* %base, i32 %offset, i32 %vl) {
; CHECK-LABEL: test_vssseg6_nxv1i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg6_mask_nxv1i16(<vscale x 1 x i16> %val, i16* %base, i32 %offset, <vscale x 1 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vssseg6_mask_nxv1i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg7_nxv1i16(<vscale x 1 x i16> %val, i16* %base, i32 %offset, i32 %vl) {
; CHECK-LABEL: test_vssseg7_nxv1i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg7_mask_nxv1i16(<vscale x 1 x i16> %val, i16* %base, i32 %offset, <vscale x 1 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vssseg7_mask_nxv1i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg8_nxv1i16(<vscale x 1 x i16> %val, i16* %base, i32 %offset, i32 %vl) {
; CHECK-LABEL: test_vssseg8_nxv1i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg8_mask_nxv1i16(<vscale x 1 x i16> %val, i16* %base, i32 %offset, <vscale x 1 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vssseg8_mask_nxv1i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg2_nxv32i8(<vscale x 32 x i8> %val, i8* %base, i32 %offset, i32 %vl) {
; CHECK-LABEL: test_vssseg2_nxv32i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a2, e8, m4, ta, mu
; CHECK-NEXT: vssseg2e8.v v8, (a0), a1
define void @test_vssseg2_mask_nxv32i8(<vscale x 32 x i8> %val, i8* %base, i32 %offset, <vscale x 32 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vssseg2_mask_nxv32i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a2, e8, m4, ta, mu
; CHECK-NEXT: vssseg2e8.v v8, (a0), a1, v0.t
define void @test_vssseg2_nxv2i8(<vscale x 2 x i8> %val, i8* %base, i32 %offset, i32 %vl) {
; CHECK-LABEL: test_vssseg2_nxv2i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a2, e8, mf4, ta, mu
; CHECK-NEXT: vssseg2e8.v v8, (a0), a1
define void @test_vssseg2_mask_nxv2i8(<vscale x 2 x i8> %val, i8* %base, i32 %offset, <vscale x 2 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vssseg2_mask_nxv2i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a2, e8, mf4, ta, mu
; CHECK-NEXT: vssseg2e8.v v8, (a0), a1, v0.t
define void @test_vssseg3_nxv2i8(<vscale x 2 x i8> %val, i8* %base, i32 %offset, i32 %vl) {
; CHECK-LABEL: test_vssseg3_nxv2i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a2, e8, mf4, ta, mu
define void @test_vssseg3_mask_nxv2i8(<vscale x 2 x i8> %val, i8* %base, i32 %offset, <vscale x 2 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vssseg3_mask_nxv2i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a2, e8, mf4, ta, mu
define void @test_vssseg4_nxv2i8(<vscale x 2 x i8> %val, i8* %base, i32 %offset, i32 %vl) {
; CHECK-LABEL: test_vssseg4_nxv2i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg4_mask_nxv2i8(<vscale x 2 x i8> %val, i8* %base, i32 %offset, <vscale x 2 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vssseg4_mask_nxv2i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg5_nxv2i8(<vscale x 2 x i8> %val, i8* %base, i32 %offset, i32 %vl) {
; CHECK-LABEL: test_vssseg5_nxv2i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg5_mask_nxv2i8(<vscale x 2 x i8> %val, i8* %base, i32 %offset, <vscale x 2 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vssseg5_mask_nxv2i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg6_nxv2i8(<vscale x 2 x i8> %val, i8* %base, i32 %offset, i32 %vl) {
; CHECK-LABEL: test_vssseg6_nxv2i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg6_mask_nxv2i8(<vscale x 2 x i8> %val, i8* %base, i32 %offset, <vscale x 2 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vssseg6_mask_nxv2i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg7_nxv2i8(<vscale x 2 x i8> %val, i8* %base, i32 %offset, i32 %vl) {
; CHECK-LABEL: test_vssseg7_nxv2i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg7_mask_nxv2i8(<vscale x 2 x i8> %val, i8* %base, i32 %offset, <vscale x 2 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vssseg7_mask_nxv2i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg8_nxv2i8(<vscale x 2 x i8> %val, i8* %base, i32 %offset, i32 %vl) {
; CHECK-LABEL: test_vssseg8_nxv2i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg8_mask_nxv2i8(<vscale x 2 x i8> %val, i8* %base, i32 %offset, <vscale x 2 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vssseg8_mask_nxv2i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg2_nxv2i16(<vscale x 2 x i16> %val, i16* %base, i32 %offset, i32 %vl) {
; CHECK-LABEL: test_vssseg2_nxv2i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, mu
; CHECK-NEXT: vssseg2e16.v v8, (a0), a1
define void @test_vssseg2_mask_nxv2i16(<vscale x 2 x i16> %val, i16* %base, i32 %offset, <vscale x 2 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vssseg2_mask_nxv2i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, mu
; CHECK-NEXT: vssseg2e16.v v8, (a0), a1, v0.t
define void @test_vssseg3_nxv2i16(<vscale x 2 x i16> %val, i16* %base, i32 %offset, i32 %vl) {
; CHECK-LABEL: test_vssseg3_nxv2i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, mu
define void @test_vssseg3_mask_nxv2i16(<vscale x 2 x i16> %val, i16* %base, i32 %offset, <vscale x 2 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vssseg3_mask_nxv2i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, mu
define void @test_vssseg4_nxv2i16(<vscale x 2 x i16> %val, i16* %base, i32 %offset, i32 %vl) {
; CHECK-LABEL: test_vssseg4_nxv2i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg4_mask_nxv2i16(<vscale x 2 x i16> %val, i16* %base, i32 %offset, <vscale x 2 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vssseg4_mask_nxv2i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg5_nxv2i16(<vscale x 2 x i16> %val, i16* %base, i32 %offset, i32 %vl) {
; CHECK-LABEL: test_vssseg5_nxv2i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg5_mask_nxv2i16(<vscale x 2 x i16> %val, i16* %base, i32 %offset, <vscale x 2 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vssseg5_mask_nxv2i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg6_nxv2i16(<vscale x 2 x i16> %val, i16* %base, i32 %offset, i32 %vl) {
; CHECK-LABEL: test_vssseg6_nxv2i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg6_mask_nxv2i16(<vscale x 2 x i16> %val, i16* %base, i32 %offset, <vscale x 2 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vssseg6_mask_nxv2i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg7_nxv2i16(<vscale x 2 x i16> %val, i16* %base, i32 %offset, i32 %vl) {
; CHECK-LABEL: test_vssseg7_nxv2i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg7_mask_nxv2i16(<vscale x 2 x i16> %val, i16* %base, i32 %offset, <vscale x 2 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vssseg7_mask_nxv2i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg8_nxv2i16(<vscale x 2 x i16> %val, i16* %base, i32 %offset, i32 %vl) {
; CHECK-LABEL: test_vssseg8_nxv2i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg8_mask_nxv2i16(<vscale x 2 x i16> %val, i16* %base, i32 %offset, <vscale x 2 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vssseg8_mask_nxv2i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg2_nxv4i32(<vscale x 4 x i32> %val, i32* %base, i32 %offset, i32 %vl) {
; CHECK-LABEL: test_vssseg2_nxv4i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a2, e32, m2, ta, mu
; CHECK-NEXT: vssseg2e32.v v8, (a0), a1
define void @test_vssseg2_mask_nxv4i32(<vscale x 4 x i32> %val, i32* %base, i32 %offset, <vscale x 4 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vssseg2_mask_nxv4i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a2, e32, m2, ta, mu
; CHECK-NEXT: vssseg2e32.v v8, (a0), a1, v0.t
define void @test_vssseg3_nxv4i32(<vscale x 4 x i32> %val, i32* %base, i32 %offset, i32 %vl) {
; CHECK-LABEL: test_vssseg3_nxv4i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vsetvli zero, a2, e32, m2, ta, mu
define void @test_vssseg3_mask_nxv4i32(<vscale x 4 x i32> %val, i32* %base, i32 %offset, <vscale x 4 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vssseg3_mask_nxv4i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vsetvli zero, a2, e32, m2, ta, mu
define void @test_vssseg4_nxv4i32(<vscale x 4 x i32> %val, i32* %base, i32 %offset, i32 %vl) {
; CHECK-LABEL: test_vssseg4_nxv4i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2_v14m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vmv2r.v v14, v8
define void @test_vssseg4_mask_nxv4i32(<vscale x 4 x i32> %val, i32* %base, i32 %offset, <vscale x 4 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vssseg4_mask_nxv4i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2_v14m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vmv2r.v v14, v8
define void @test_vssseg2_nxv16f16(<vscale x 16 x half> %val, half* %base, i32 %offset, i32 %vl) {
; CHECK-LABEL: test_vssseg2_nxv16f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a2, e16, m4, ta, mu
; CHECK-NEXT: vssseg2e16.v v8, (a0), a1
define void @test_vssseg2_mask_nxv16f16(<vscale x 16 x half> %val, half* %base, i32 %offset, <vscale x 16 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vssseg2_mask_nxv16f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a2, e16, m4, ta, mu
; CHECK-NEXT: vssseg2e16.v v8, (a0), a1, v0.t
define void @test_vssseg2_nxv4f64(<vscale x 4 x double> %val, double* %base, i32 %offset, i32 %vl) {
; CHECK-LABEL: test_vssseg2_nxv4f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu
; CHECK-NEXT: vssseg2e64.v v8, (a0), a1
define void @test_vssseg2_mask_nxv4f64(<vscale x 4 x double> %val, double* %base, i32 %offset, <vscale x 4 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vssseg2_mask_nxv4f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu
; CHECK-NEXT: vssseg2e64.v v8, (a0), a1, v0.t
define void @test_vssseg2_nxv1f64(<vscale x 1 x double> %val, double* %base, i32 %offset, i32 %vl) {
; CHECK-LABEL: test_vssseg2_nxv1f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu
; CHECK-NEXT: vssseg2e64.v v8, (a0), a1
define void @test_vssseg2_mask_nxv1f64(<vscale x 1 x double> %val, double* %base, i32 %offset, <vscale x 1 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vssseg2_mask_nxv1f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu
; CHECK-NEXT: vssseg2e64.v v8, (a0), a1, v0.t
define void @test_vssseg3_nxv1f64(<vscale x 1 x double> %val, double* %base, i32 %offset, i32 %vl) {
; CHECK-LABEL: test_vssseg3_nxv1f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu
define void @test_vssseg3_mask_nxv1f64(<vscale x 1 x double> %val, double* %base, i32 %offset, <vscale x 1 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vssseg3_mask_nxv1f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu
define void @test_vssseg4_nxv1f64(<vscale x 1 x double> %val, double* %base, i32 %offset, i32 %vl) {
; CHECK-LABEL: test_vssseg4_nxv1f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg4_mask_nxv1f64(<vscale x 1 x double> %val, double* %base, i32 %offset, <vscale x 1 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vssseg4_mask_nxv1f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg5_nxv1f64(<vscale x 1 x double> %val, double* %base, i32 %offset, i32 %vl) {
; CHECK-LABEL: test_vssseg5_nxv1f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg5_mask_nxv1f64(<vscale x 1 x double> %val, double* %base, i32 %offset, <vscale x 1 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vssseg5_mask_nxv1f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg6_nxv1f64(<vscale x 1 x double> %val, double* %base, i32 %offset, i32 %vl) {
; CHECK-LABEL: test_vssseg6_nxv1f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg6_mask_nxv1f64(<vscale x 1 x double> %val, double* %base, i32 %offset, <vscale x 1 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vssseg6_mask_nxv1f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg7_nxv1f64(<vscale x 1 x double> %val, double* %base, i32 %offset, i32 %vl) {
; CHECK-LABEL: test_vssseg7_nxv1f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg7_mask_nxv1f64(<vscale x 1 x double> %val, double* %base, i32 %offset, <vscale x 1 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vssseg7_mask_nxv1f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg8_nxv1f64(<vscale x 1 x double> %val, double* %base, i32 %offset, i32 %vl) {
; CHECK-LABEL: test_vssseg8_nxv1f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg8_mask_nxv1f64(<vscale x 1 x double> %val, double* %base, i32 %offset, <vscale x 1 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vssseg8_mask_nxv1f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg2_nxv2f32(<vscale x 2 x float> %val, float* %base, i32 %offset, i32 %vl) {
; CHECK-LABEL: test_vssseg2_nxv2f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, mu
; CHECK-NEXT: vssseg2e32.v v8, (a0), a1
define void @test_vssseg2_mask_nxv2f32(<vscale x 2 x float> %val, float* %base, i32 %offset, <vscale x 2 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vssseg2_mask_nxv2f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, mu
; CHECK-NEXT: vssseg2e32.v v8, (a0), a1, v0.t
define void @test_vssseg3_nxv2f32(<vscale x 2 x float> %val, float* %base, i32 %offset, i32 %vl) {
; CHECK-LABEL: test_vssseg3_nxv2f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, mu
define void @test_vssseg3_mask_nxv2f32(<vscale x 2 x float> %val, float* %base, i32 %offset, <vscale x 2 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vssseg3_mask_nxv2f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, mu
define void @test_vssseg4_nxv2f32(<vscale x 2 x float> %val, float* %base, i32 %offset, i32 %vl) {
; CHECK-LABEL: test_vssseg4_nxv2f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg4_mask_nxv2f32(<vscale x 2 x float> %val, float* %base, i32 %offset, <vscale x 2 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vssseg4_mask_nxv2f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg5_nxv2f32(<vscale x 2 x float> %val, float* %base, i32 %offset, i32 %vl) {
; CHECK-LABEL: test_vssseg5_nxv2f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg5_mask_nxv2f32(<vscale x 2 x float> %val, float* %base, i32 %offset, <vscale x 2 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vssseg5_mask_nxv2f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg6_nxv2f32(<vscale x 2 x float> %val, float* %base, i32 %offset, i32 %vl) {
; CHECK-LABEL: test_vssseg6_nxv2f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg6_mask_nxv2f32(<vscale x 2 x float> %val, float* %base, i32 %offset, <vscale x 2 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vssseg6_mask_nxv2f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg7_nxv2f32(<vscale x 2 x float> %val, float* %base, i32 %offset, i32 %vl) {
; CHECK-LABEL: test_vssseg7_nxv2f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg7_mask_nxv2f32(<vscale x 2 x float> %val, float* %base, i32 %offset, <vscale x 2 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vssseg7_mask_nxv2f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg8_nxv2f32(<vscale x 2 x float> %val, float* %base, i32 %offset, i32 %vl) {
; CHECK-LABEL: test_vssseg8_nxv2f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg8_mask_nxv2f32(<vscale x 2 x float> %val, float* %base, i32 %offset, <vscale x 2 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vssseg8_mask_nxv2f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg2_nxv1f16(<vscale x 1 x half> %val, half* %base, i32 %offset, i32 %vl) {
; CHECK-LABEL: test_vssseg2_nxv1f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, mu
; CHECK-NEXT: vssseg2e16.v v8, (a0), a1
define void @test_vssseg2_mask_nxv1f16(<vscale x 1 x half> %val, half* %base, i32 %offset, <vscale x 1 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vssseg2_mask_nxv1f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, mu
; CHECK-NEXT: vssseg2e16.v v8, (a0), a1, v0.t
define void @test_vssseg3_nxv1f16(<vscale x 1 x half> %val, half* %base, i32 %offset, i32 %vl) {
; CHECK-LABEL: test_vssseg3_nxv1f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, mu
define void @test_vssseg3_mask_nxv1f16(<vscale x 1 x half> %val, half* %base, i32 %offset, <vscale x 1 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vssseg3_mask_nxv1f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, mu
define void @test_vssseg4_nxv1f16(<vscale x 1 x half> %val, half* %base, i32 %offset, i32 %vl) {
; CHECK-LABEL: test_vssseg4_nxv1f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg4_mask_nxv1f16(<vscale x 1 x half> %val, half* %base, i32 %offset, <vscale x 1 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vssseg4_mask_nxv1f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg5_nxv1f16(<vscale x 1 x half> %val, half* %base, i32 %offset, i32 %vl) {
; CHECK-LABEL: test_vssseg5_nxv1f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg5_mask_nxv1f16(<vscale x 1 x half> %val, half* %base, i32 %offset, <vscale x 1 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vssseg5_mask_nxv1f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg6_nxv1f16(<vscale x 1 x half> %val, half* %base, i32 %offset, i32 %vl) {
; CHECK-LABEL: test_vssseg6_nxv1f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg6_mask_nxv1f16(<vscale x 1 x half> %val, half* %base, i32 %offset, <vscale x 1 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vssseg6_mask_nxv1f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg7_nxv1f16(<vscale x 1 x half> %val, half* %base, i32 %offset, i32 %vl) {
; CHECK-LABEL: test_vssseg7_nxv1f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg7_mask_nxv1f16(<vscale x 1 x half> %val, half* %base, i32 %offset, <vscale x 1 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vssseg7_mask_nxv1f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg8_nxv1f16(<vscale x 1 x half> %val, half* %base, i32 %offset, i32 %vl) {
; CHECK-LABEL: test_vssseg8_nxv1f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg8_mask_nxv1f16(<vscale x 1 x half> %val, half* %base, i32 %offset, <vscale x 1 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vssseg8_mask_nxv1f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg2_nxv1f32(<vscale x 1 x float> %val, float* %base, i32 %offset, i32 %vl) {
; CHECK-LABEL: test_vssseg2_nxv1f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, mu
; CHECK-NEXT: vssseg2e32.v v8, (a0), a1
define void @test_vssseg2_mask_nxv1f32(<vscale x 1 x float> %val, float* %base, i32 %offset, <vscale x 1 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vssseg2_mask_nxv1f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, mu
; CHECK-NEXT: vssseg2e32.v v8, (a0), a1, v0.t
define void @test_vssseg3_nxv1f32(<vscale x 1 x float> %val, float* %base, i32 %offset, i32 %vl) {
; CHECK-LABEL: test_vssseg3_nxv1f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, mu
define void @test_vssseg3_mask_nxv1f32(<vscale x 1 x float> %val, float* %base, i32 %offset, <vscale x 1 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vssseg3_mask_nxv1f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, mu
define void @test_vssseg4_nxv1f32(<vscale x 1 x float> %val, float* %base, i32 %offset, i32 %vl) {
; CHECK-LABEL: test_vssseg4_nxv1f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg4_mask_nxv1f32(<vscale x 1 x float> %val, float* %base, i32 %offset, <vscale x 1 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vssseg4_mask_nxv1f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg5_nxv1f32(<vscale x 1 x float> %val, float* %base, i32 %offset, i32 %vl) {
; CHECK-LABEL: test_vssseg5_nxv1f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg5_mask_nxv1f32(<vscale x 1 x float> %val, float* %base, i32 %offset, <vscale x 1 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vssseg5_mask_nxv1f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg6_nxv1f32(<vscale x 1 x float> %val, float* %base, i32 %offset, i32 %vl) {
; CHECK-LABEL: test_vssseg6_nxv1f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg6_mask_nxv1f32(<vscale x 1 x float> %val, float* %base, i32 %offset, <vscale x 1 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vssseg6_mask_nxv1f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg7_nxv1f32(<vscale x 1 x float> %val, float* %base, i32 %offset, i32 %vl) {
; CHECK-LABEL: test_vssseg7_nxv1f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg7_mask_nxv1f32(<vscale x 1 x float> %val, float* %base, i32 %offset, <vscale x 1 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vssseg7_mask_nxv1f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg8_nxv1f32(<vscale x 1 x float> %val, float* %base, i32 %offset, i32 %vl) {
; CHECK-LABEL: test_vssseg8_nxv1f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg8_mask_nxv1f32(<vscale x 1 x float> %val, float* %base, i32 %offset, <vscale x 1 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vssseg8_mask_nxv1f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg2_nxv8f16(<vscale x 8 x half> %val, half* %base, i32 %offset, i32 %vl) {
; CHECK-LABEL: test_vssseg2_nxv8f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, mu
; CHECK-NEXT: vssseg2e16.v v8, (a0), a1
define void @test_vssseg2_mask_nxv8f16(<vscale x 8 x half> %val, half* %base, i32 %offset, <vscale x 8 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vssseg2_mask_nxv8f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, mu
; CHECK-NEXT: vssseg2e16.v v8, (a0), a1, v0.t
define void @test_vssseg3_nxv8f16(<vscale x 8 x half> %val, half* %base, i32 %offset, i32 %vl) {
; CHECK-LABEL: test_vssseg3_nxv8f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, mu
define void @test_vssseg3_mask_nxv8f16(<vscale x 8 x half> %val, half* %base, i32 %offset, <vscale x 8 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vssseg3_mask_nxv8f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, mu
define void @test_vssseg4_nxv8f16(<vscale x 8 x half> %val, half* %base, i32 %offset, i32 %vl) {
; CHECK-LABEL: test_vssseg4_nxv8f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2_v14m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vmv2r.v v14, v8
define void @test_vssseg4_mask_nxv8f16(<vscale x 8 x half> %val, half* %base, i32 %offset, <vscale x 8 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vssseg4_mask_nxv8f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2_v14m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vmv2r.v v14, v8
define void @test_vssseg2_nxv8f32(<vscale x 8 x float> %val, float* %base, i32 %offset, i32 %vl) {
; CHECK-LABEL: test_vssseg2_nxv8f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a2, e32, m4, ta, mu
; CHECK-NEXT: vssseg2e32.v v8, (a0), a1
define void @test_vssseg2_mask_nxv8f32(<vscale x 8 x float> %val, float* %base, i32 %offset, <vscale x 8 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vssseg2_mask_nxv8f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a2, e32, m4, ta, mu
; CHECK-NEXT: vssseg2e32.v v8, (a0), a1, v0.t
define void @test_vssseg2_nxv2f64(<vscale x 2 x double> %val, double* %base, i32 %offset, i32 %vl) {
; CHECK-LABEL: test_vssseg2_nxv2f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu
; CHECK-NEXT: vssseg2e64.v v8, (a0), a1
define void @test_vssseg2_mask_nxv2f64(<vscale x 2 x double> %val, double* %base, i32 %offset, <vscale x 2 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vssseg2_mask_nxv2f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu
; CHECK-NEXT: vssseg2e64.v v8, (a0), a1, v0.t
define void @test_vssseg3_nxv2f64(<vscale x 2 x double> %val, double* %base, i32 %offset, i32 %vl) {
; CHECK-LABEL: test_vssseg3_nxv2f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu
define void @test_vssseg3_mask_nxv2f64(<vscale x 2 x double> %val, double* %base, i32 %offset, <vscale x 2 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vssseg3_mask_nxv2f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu
define void @test_vssseg4_nxv2f64(<vscale x 2 x double> %val, double* %base, i32 %offset, i32 %vl) {
; CHECK-LABEL: test_vssseg4_nxv2f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2_v14m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vmv2r.v v14, v8
define void @test_vssseg4_mask_nxv2f64(<vscale x 2 x double> %val, double* %base, i32 %offset, <vscale x 2 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vssseg4_mask_nxv2f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2_v14m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vmv2r.v v14, v8
define void @test_vssseg2_nxv4f16(<vscale x 4 x half> %val, half* %base, i32 %offset, i32 %vl) {
; CHECK-LABEL: test_vssseg2_nxv4f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, mu
; CHECK-NEXT: vssseg2e16.v v8, (a0), a1
define void @test_vssseg2_mask_nxv4f16(<vscale x 4 x half> %val, half* %base, i32 %offset, <vscale x 4 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vssseg2_mask_nxv4f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, mu
; CHECK-NEXT: vssseg2e16.v v8, (a0), a1, v0.t
define void @test_vssseg3_nxv4f16(<vscale x 4 x half> %val, half* %base, i32 %offset, i32 %vl) {
; CHECK-LABEL: test_vssseg3_nxv4f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, mu
define void @test_vssseg3_mask_nxv4f16(<vscale x 4 x half> %val, half* %base, i32 %offset, <vscale x 4 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vssseg3_mask_nxv4f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, mu
define void @test_vssseg4_nxv4f16(<vscale x 4 x half> %val, half* %base, i32 %offset, i32 %vl) {
; CHECK-LABEL: test_vssseg4_nxv4f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg4_mask_nxv4f16(<vscale x 4 x half> %val, half* %base, i32 %offset, <vscale x 4 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vssseg4_mask_nxv4f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg5_nxv4f16(<vscale x 4 x half> %val, half* %base, i32 %offset, i32 %vl) {
; CHECK-LABEL: test_vssseg5_nxv4f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg5_mask_nxv4f16(<vscale x 4 x half> %val, half* %base, i32 %offset, <vscale x 4 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vssseg5_mask_nxv4f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg6_nxv4f16(<vscale x 4 x half> %val, half* %base, i32 %offset, i32 %vl) {
; CHECK-LABEL: test_vssseg6_nxv4f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg6_mask_nxv4f16(<vscale x 4 x half> %val, half* %base, i32 %offset, <vscale x 4 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vssseg6_mask_nxv4f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg7_nxv4f16(<vscale x 4 x half> %val, half* %base, i32 %offset, i32 %vl) {
; CHECK-LABEL: test_vssseg7_nxv4f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg7_mask_nxv4f16(<vscale x 4 x half> %val, half* %base, i32 %offset, <vscale x 4 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vssseg7_mask_nxv4f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg8_nxv4f16(<vscale x 4 x half> %val, half* %base, i32 %offset, i32 %vl) {
; CHECK-LABEL: test_vssseg8_nxv4f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg8_mask_nxv4f16(<vscale x 4 x half> %val, half* %base, i32 %offset, <vscale x 4 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vssseg8_mask_nxv4f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg2_nxv2f16(<vscale x 2 x half> %val, half* %base, i32 %offset, i32 %vl) {
; CHECK-LABEL: test_vssseg2_nxv2f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, mu
; CHECK-NEXT: vssseg2e16.v v8, (a0), a1
define void @test_vssseg2_mask_nxv2f16(<vscale x 2 x half> %val, half* %base, i32 %offset, <vscale x 2 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vssseg2_mask_nxv2f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, mu
; CHECK-NEXT: vssseg2e16.v v8, (a0), a1, v0.t
define void @test_vssseg3_nxv2f16(<vscale x 2 x half> %val, half* %base, i32 %offset, i32 %vl) {
; CHECK-LABEL: test_vssseg3_nxv2f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, mu
define void @test_vssseg3_mask_nxv2f16(<vscale x 2 x half> %val, half* %base, i32 %offset, <vscale x 2 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vssseg3_mask_nxv2f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, mu
define void @test_vssseg4_nxv2f16(<vscale x 2 x half> %val, half* %base, i32 %offset, i32 %vl) {
; CHECK-LABEL: test_vssseg4_nxv2f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg4_mask_nxv2f16(<vscale x 2 x half> %val, half* %base, i32 %offset, <vscale x 2 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vssseg4_mask_nxv2f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg5_nxv2f16(<vscale x 2 x half> %val, half* %base, i32 %offset, i32 %vl) {
; CHECK-LABEL: test_vssseg5_nxv2f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg5_mask_nxv2f16(<vscale x 2 x half> %val, half* %base, i32 %offset, <vscale x 2 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vssseg5_mask_nxv2f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg6_nxv2f16(<vscale x 2 x half> %val, half* %base, i32 %offset, i32 %vl) {
; CHECK-LABEL: test_vssseg6_nxv2f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg6_mask_nxv2f16(<vscale x 2 x half> %val, half* %base, i32 %offset, <vscale x 2 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vssseg6_mask_nxv2f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg7_nxv2f16(<vscale x 2 x half> %val, half* %base, i32 %offset, i32 %vl) {
; CHECK-LABEL: test_vssseg7_nxv2f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg7_mask_nxv2f16(<vscale x 2 x half> %val, half* %base, i32 %offset, <vscale x 2 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vssseg7_mask_nxv2f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg8_nxv2f16(<vscale x 2 x half> %val, half* %base, i32 %offset, i32 %vl) {
; CHECK-LABEL: test_vssseg8_nxv2f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg8_mask_nxv2f16(<vscale x 2 x half> %val, half* %base, i32 %offset, <vscale x 2 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vssseg8_mask_nxv2f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg2_nxv4f32(<vscale x 4 x float> %val, float* %base, i32 %offset, i32 %vl) {
; CHECK-LABEL: test_vssseg2_nxv4f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a2, e32, m2, ta, mu
; CHECK-NEXT: vssseg2e32.v v8, (a0), a1
define void @test_vssseg2_mask_nxv4f32(<vscale x 4 x float> %val, float* %base, i32 %offset, <vscale x 4 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vssseg2_mask_nxv4f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a2, e32, m2, ta, mu
; CHECK-NEXT: vssseg2e32.v v8, (a0), a1, v0.t
define void @test_vssseg3_nxv4f32(<vscale x 4 x float> %val, float* %base, i32 %offset, i32 %vl) {
; CHECK-LABEL: test_vssseg3_nxv4f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vsetvli zero, a2, e32, m2, ta, mu
define void @test_vssseg3_mask_nxv4f32(<vscale x 4 x float> %val, float* %base, i32 %offset, <vscale x 4 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vssseg3_mask_nxv4f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vsetvli zero, a2, e32, m2, ta, mu
define void @test_vssseg4_nxv4f32(<vscale x 4 x float> %val, float* %base, i32 %offset, i32 %vl) {
; CHECK-LABEL: test_vssseg4_nxv4f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2_v14m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vmv2r.v v14, v8
define void @test_vssseg4_mask_nxv4f32(<vscale x 4 x float> %val, float* %base, i32 %offset, <vscale x 4 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vssseg4_mask_nxv4f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2_v14m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vmv2r.v v14, v8
define void @test_vssseg2_nxv16i16(<vscale x 16 x i16> %val, i16* %base, i64 %offset, i64 %vl) {
; CHECK-LABEL: test_vssseg2_nxv16i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a2, e16, m4, ta, mu
; CHECK-NEXT: vssseg2e16.v v8, (a0), a1
define void @test_vssseg2_mask_nxv16i16(<vscale x 16 x i16> %val, i16* %base, i64 %offset, <vscale x 16 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vssseg2_mask_nxv16i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a2, e16, m4, ta, mu
; CHECK-NEXT: vssseg2e16.v v8, (a0), a1, v0.t
define void @test_vssseg2_nxv4i32(<vscale x 4 x i32> %val, i32* %base, i64 %offset, i64 %vl) {
; CHECK-LABEL: test_vssseg2_nxv4i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a2, e32, m2, ta, mu
; CHECK-NEXT: vssseg2e32.v v8, (a0), a1
define void @test_vssseg2_mask_nxv4i32(<vscale x 4 x i32> %val, i32* %base, i64 %offset, <vscale x 4 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vssseg2_mask_nxv4i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a2, e32, m2, ta, mu
; CHECK-NEXT: vssseg2e32.v v8, (a0), a1, v0.t
define void @test_vssseg3_nxv4i32(<vscale x 4 x i32> %val, i32* %base, i64 %offset, i64 %vl) {
; CHECK-LABEL: test_vssseg3_nxv4i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vsetvli zero, a2, e32, m2, ta, mu
define void @test_vssseg3_mask_nxv4i32(<vscale x 4 x i32> %val, i32* %base, i64 %offset, <vscale x 4 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vssseg3_mask_nxv4i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vsetvli zero, a2, e32, m2, ta, mu
define void @test_vssseg4_nxv4i32(<vscale x 4 x i32> %val, i32* %base, i64 %offset, i64 %vl) {
; CHECK-LABEL: test_vssseg4_nxv4i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2_v14m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vmv2r.v v14, v8
define void @test_vssseg4_mask_nxv4i32(<vscale x 4 x i32> %val, i32* %base, i64 %offset, <vscale x 4 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vssseg4_mask_nxv4i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2_v14m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vmv2r.v v14, v8
define void @test_vssseg2_nxv16i8(<vscale x 16 x i8> %val, i8* %base, i64 %offset, i64 %vl) {
; CHECK-LABEL: test_vssseg2_nxv16i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a2, e8, m2, ta, mu
; CHECK-NEXT: vssseg2e8.v v8, (a0), a1
define void @test_vssseg2_mask_nxv16i8(<vscale x 16 x i8> %val, i8* %base, i64 %offset, <vscale x 16 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vssseg2_mask_nxv16i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a2, e8, m2, ta, mu
; CHECK-NEXT: vssseg2e8.v v8, (a0), a1, v0.t
define void @test_vssseg3_nxv16i8(<vscale x 16 x i8> %val, i8* %base, i64 %offset, i64 %vl) {
; CHECK-LABEL: test_vssseg3_nxv16i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vsetvli zero, a2, e8, m2, ta, mu
define void @test_vssseg3_mask_nxv16i8(<vscale x 16 x i8> %val, i8* %base, i64 %offset, <vscale x 16 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vssseg3_mask_nxv16i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vsetvli zero, a2, e8, m2, ta, mu
define void @test_vssseg4_nxv16i8(<vscale x 16 x i8> %val, i8* %base, i64 %offset, i64 %vl) {
; CHECK-LABEL: test_vssseg4_nxv16i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2_v14m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vmv2r.v v14, v8
define void @test_vssseg4_mask_nxv16i8(<vscale x 16 x i8> %val, i8* %base, i64 %offset, <vscale x 16 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vssseg4_mask_nxv16i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2_v14m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vmv2r.v v14, v8
define void @test_vssseg2_nxv1i64(<vscale x 1 x i64> %val, i64* %base, i64 %offset, i64 %vl) {
; CHECK-LABEL: test_vssseg2_nxv1i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu
; CHECK-NEXT: vssseg2e64.v v8, (a0), a1
define void @test_vssseg2_mask_nxv1i64(<vscale x 1 x i64> %val, i64* %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vssseg2_mask_nxv1i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu
; CHECK-NEXT: vssseg2e64.v v8, (a0), a1, v0.t
define void @test_vssseg3_nxv1i64(<vscale x 1 x i64> %val, i64* %base, i64 %offset, i64 %vl) {
; CHECK-LABEL: test_vssseg3_nxv1i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu
define void @test_vssseg3_mask_nxv1i64(<vscale x 1 x i64> %val, i64* %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vssseg3_mask_nxv1i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu
define void @test_vssseg4_nxv1i64(<vscale x 1 x i64> %val, i64* %base, i64 %offset, i64 %vl) {
; CHECK-LABEL: test_vssseg4_nxv1i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg4_mask_nxv1i64(<vscale x 1 x i64> %val, i64* %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vssseg4_mask_nxv1i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg5_nxv1i64(<vscale x 1 x i64> %val, i64* %base, i64 %offset, i64 %vl) {
; CHECK-LABEL: test_vssseg5_nxv1i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg5_mask_nxv1i64(<vscale x 1 x i64> %val, i64* %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vssseg5_mask_nxv1i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg6_nxv1i64(<vscale x 1 x i64> %val, i64* %base, i64 %offset, i64 %vl) {
; CHECK-LABEL: test_vssseg6_nxv1i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg6_mask_nxv1i64(<vscale x 1 x i64> %val, i64* %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vssseg6_mask_nxv1i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg7_nxv1i64(<vscale x 1 x i64> %val, i64* %base, i64 %offset, i64 %vl) {
; CHECK-LABEL: test_vssseg7_nxv1i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg7_mask_nxv1i64(<vscale x 1 x i64> %val, i64* %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vssseg7_mask_nxv1i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg8_nxv1i64(<vscale x 1 x i64> %val, i64* %base, i64 %offset, i64 %vl) {
; CHECK-LABEL: test_vssseg8_nxv1i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg8_mask_nxv1i64(<vscale x 1 x i64> %val, i64* %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vssseg8_mask_nxv1i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg2_nxv1i32(<vscale x 1 x i32> %val, i32* %base, i64 %offset, i64 %vl) {
; CHECK-LABEL: test_vssseg2_nxv1i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, mu
; CHECK-NEXT: vssseg2e32.v v8, (a0), a1
define void @test_vssseg2_mask_nxv1i32(<vscale x 1 x i32> %val, i32* %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vssseg2_mask_nxv1i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, mu
; CHECK-NEXT: vssseg2e32.v v8, (a0), a1, v0.t
define void @test_vssseg3_nxv1i32(<vscale x 1 x i32> %val, i32* %base, i64 %offset, i64 %vl) {
; CHECK-LABEL: test_vssseg3_nxv1i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, mu
define void @test_vssseg3_mask_nxv1i32(<vscale x 1 x i32> %val, i32* %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vssseg3_mask_nxv1i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, mu
define void @test_vssseg4_nxv1i32(<vscale x 1 x i32> %val, i32* %base, i64 %offset, i64 %vl) {
; CHECK-LABEL: test_vssseg4_nxv1i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg4_mask_nxv1i32(<vscale x 1 x i32> %val, i32* %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vssseg4_mask_nxv1i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg5_nxv1i32(<vscale x 1 x i32> %val, i32* %base, i64 %offset, i64 %vl) {
; CHECK-LABEL: test_vssseg5_nxv1i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg5_mask_nxv1i32(<vscale x 1 x i32> %val, i32* %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vssseg5_mask_nxv1i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg6_nxv1i32(<vscale x 1 x i32> %val, i32* %base, i64 %offset, i64 %vl) {
; CHECK-LABEL: test_vssseg6_nxv1i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg6_mask_nxv1i32(<vscale x 1 x i32> %val, i32* %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vssseg6_mask_nxv1i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg7_nxv1i32(<vscale x 1 x i32> %val, i32* %base, i64 %offset, i64 %vl) {
; CHECK-LABEL: test_vssseg7_nxv1i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg7_mask_nxv1i32(<vscale x 1 x i32> %val, i32* %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vssseg7_mask_nxv1i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg8_nxv1i32(<vscale x 1 x i32> %val, i32* %base, i64 %offset, i64 %vl) {
; CHECK-LABEL: test_vssseg8_nxv1i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg8_mask_nxv1i32(<vscale x 1 x i32> %val, i32* %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vssseg8_mask_nxv1i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg2_nxv8i16(<vscale x 8 x i16> %val, i16* %base, i64 %offset, i64 %vl) {
; CHECK-LABEL: test_vssseg2_nxv8i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, mu
; CHECK-NEXT: vssseg2e16.v v8, (a0), a1
define void @test_vssseg2_mask_nxv8i16(<vscale x 8 x i16> %val, i16* %base, i64 %offset, <vscale x 8 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vssseg2_mask_nxv8i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, mu
; CHECK-NEXT: vssseg2e16.v v8, (a0), a1, v0.t
define void @test_vssseg3_nxv8i16(<vscale x 8 x i16> %val, i16* %base, i64 %offset, i64 %vl) {
; CHECK-LABEL: test_vssseg3_nxv8i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, mu
define void @test_vssseg3_mask_nxv8i16(<vscale x 8 x i16> %val, i16* %base, i64 %offset, <vscale x 8 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vssseg3_mask_nxv8i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, mu
define void @test_vssseg4_nxv8i16(<vscale x 8 x i16> %val, i16* %base, i64 %offset, i64 %vl) {
; CHECK-LABEL: test_vssseg4_nxv8i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2_v14m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vmv2r.v v14, v8
define void @test_vssseg4_mask_nxv8i16(<vscale x 8 x i16> %val, i16* %base, i64 %offset, <vscale x 8 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vssseg4_mask_nxv8i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2_v14m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vmv2r.v v14, v8
define void @test_vssseg2_nxv4i8(<vscale x 4 x i8> %val, i8* %base, i64 %offset, i64 %vl) {
; CHECK-LABEL: test_vssseg2_nxv4i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a2, e8, mf2, ta, mu
; CHECK-NEXT: vssseg2e8.v v8, (a0), a1
define void @test_vssseg2_mask_nxv4i8(<vscale x 4 x i8> %val, i8* %base, i64 %offset, <vscale x 4 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vssseg2_mask_nxv4i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a2, e8, mf2, ta, mu
; CHECK-NEXT: vssseg2e8.v v8, (a0), a1, v0.t
define void @test_vssseg3_nxv4i8(<vscale x 4 x i8> %val, i8* %base, i64 %offset, i64 %vl) {
; CHECK-LABEL: test_vssseg3_nxv4i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a2, e8, mf2, ta, mu
define void @test_vssseg3_mask_nxv4i8(<vscale x 4 x i8> %val, i8* %base, i64 %offset, <vscale x 4 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vssseg3_mask_nxv4i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a2, e8, mf2, ta, mu
define void @test_vssseg4_nxv4i8(<vscale x 4 x i8> %val, i8* %base, i64 %offset, i64 %vl) {
; CHECK-LABEL: test_vssseg4_nxv4i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg4_mask_nxv4i8(<vscale x 4 x i8> %val, i8* %base, i64 %offset, <vscale x 4 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vssseg4_mask_nxv4i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg5_nxv4i8(<vscale x 4 x i8> %val, i8* %base, i64 %offset, i64 %vl) {
; CHECK-LABEL: test_vssseg5_nxv4i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg5_mask_nxv4i8(<vscale x 4 x i8> %val, i8* %base, i64 %offset, <vscale x 4 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vssseg5_mask_nxv4i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg6_nxv4i8(<vscale x 4 x i8> %val, i8* %base, i64 %offset, i64 %vl) {
; CHECK-LABEL: test_vssseg6_nxv4i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg6_mask_nxv4i8(<vscale x 4 x i8> %val, i8* %base, i64 %offset, <vscale x 4 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vssseg6_mask_nxv4i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg7_nxv4i8(<vscale x 4 x i8> %val, i8* %base, i64 %offset, i64 %vl) {
; CHECK-LABEL: test_vssseg7_nxv4i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg7_mask_nxv4i8(<vscale x 4 x i8> %val, i8* %base, i64 %offset, <vscale x 4 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vssseg7_mask_nxv4i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg8_nxv4i8(<vscale x 4 x i8> %val, i8* %base, i64 %offset, i64 %vl) {
; CHECK-LABEL: test_vssseg8_nxv4i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg8_mask_nxv4i8(<vscale x 4 x i8> %val, i8* %base, i64 %offset, <vscale x 4 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vssseg8_mask_nxv4i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg2_nxv1i16(<vscale x 1 x i16> %val, i16* %base, i64 %offset, i64 %vl) {
; CHECK-LABEL: test_vssseg2_nxv1i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, mu
; CHECK-NEXT: vssseg2e16.v v8, (a0), a1
define void @test_vssseg2_mask_nxv1i16(<vscale x 1 x i16> %val, i16* %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vssseg2_mask_nxv1i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, mu
; CHECK-NEXT: vssseg2e16.v v8, (a0), a1, v0.t
define void @test_vssseg3_nxv1i16(<vscale x 1 x i16> %val, i16* %base, i64 %offset, i64 %vl) {
; CHECK-LABEL: test_vssseg3_nxv1i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, mu
define void @test_vssseg3_mask_nxv1i16(<vscale x 1 x i16> %val, i16* %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vssseg3_mask_nxv1i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, mu
define void @test_vssseg4_nxv1i16(<vscale x 1 x i16> %val, i16* %base, i64 %offset, i64 %vl) {
; CHECK-LABEL: test_vssseg4_nxv1i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg4_mask_nxv1i16(<vscale x 1 x i16> %val, i16* %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vssseg4_mask_nxv1i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg5_nxv1i16(<vscale x 1 x i16> %val, i16* %base, i64 %offset, i64 %vl) {
; CHECK-LABEL: test_vssseg5_nxv1i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg5_mask_nxv1i16(<vscale x 1 x i16> %val, i16* %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vssseg5_mask_nxv1i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg6_nxv1i16(<vscale x 1 x i16> %val, i16* %base, i64 %offset, i64 %vl) {
; CHECK-LABEL: test_vssseg6_nxv1i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg6_mask_nxv1i16(<vscale x 1 x i16> %val, i16* %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vssseg6_mask_nxv1i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg7_nxv1i16(<vscale x 1 x i16> %val, i16* %base, i64 %offset, i64 %vl) {
; CHECK-LABEL: test_vssseg7_nxv1i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg7_mask_nxv1i16(<vscale x 1 x i16> %val, i16* %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vssseg7_mask_nxv1i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg8_nxv1i16(<vscale x 1 x i16> %val, i16* %base, i64 %offset, i64 %vl) {
; CHECK-LABEL: test_vssseg8_nxv1i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg8_mask_nxv1i16(<vscale x 1 x i16> %val, i16* %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vssseg8_mask_nxv1i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg2_nxv2i32(<vscale x 2 x i32> %val, i32* %base, i64 %offset, i64 %vl) {
; CHECK-LABEL: test_vssseg2_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, mu
; CHECK-NEXT: vssseg2e32.v v8, (a0), a1
define void @test_vssseg2_mask_nxv2i32(<vscale x 2 x i32> %val, i32* %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vssseg2_mask_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, mu
; CHECK-NEXT: vssseg2e32.v v8, (a0), a1, v0.t
define void @test_vssseg3_nxv2i32(<vscale x 2 x i32> %val, i32* %base, i64 %offset, i64 %vl) {
; CHECK-LABEL: test_vssseg3_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, mu
define void @test_vssseg3_mask_nxv2i32(<vscale x 2 x i32> %val, i32* %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vssseg3_mask_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, mu
define void @test_vssseg4_nxv2i32(<vscale x 2 x i32> %val, i32* %base, i64 %offset, i64 %vl) {
; CHECK-LABEL: test_vssseg4_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg4_mask_nxv2i32(<vscale x 2 x i32> %val, i32* %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vssseg4_mask_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg5_nxv2i32(<vscale x 2 x i32> %val, i32* %base, i64 %offset, i64 %vl) {
; CHECK-LABEL: test_vssseg5_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg5_mask_nxv2i32(<vscale x 2 x i32> %val, i32* %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vssseg5_mask_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg6_nxv2i32(<vscale x 2 x i32> %val, i32* %base, i64 %offset, i64 %vl) {
; CHECK-LABEL: test_vssseg6_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg6_mask_nxv2i32(<vscale x 2 x i32> %val, i32* %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vssseg6_mask_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg7_nxv2i32(<vscale x 2 x i32> %val, i32* %base, i64 %offset, i64 %vl) {
; CHECK-LABEL: test_vssseg7_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg7_mask_nxv2i32(<vscale x 2 x i32> %val, i32* %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vssseg7_mask_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg8_nxv2i32(<vscale x 2 x i32> %val, i32* %base, i64 %offset, i64 %vl) {
; CHECK-LABEL: test_vssseg8_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg8_mask_nxv2i32(<vscale x 2 x i32> %val, i32* %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vssseg8_mask_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg2_nxv8i8(<vscale x 8 x i8> %val, i8* %base, i64 %offset, i64 %vl) {
; CHECK-LABEL: test_vssseg2_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a2, e8, m1, ta, mu
; CHECK-NEXT: vssseg2e8.v v8, (a0), a1
define void @test_vssseg2_mask_nxv8i8(<vscale x 8 x i8> %val, i8* %base, i64 %offset, <vscale x 8 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vssseg2_mask_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a2, e8, m1, ta, mu
; CHECK-NEXT: vssseg2e8.v v8, (a0), a1, v0.t
define void @test_vssseg3_nxv8i8(<vscale x 8 x i8> %val, i8* %base, i64 %offset, i64 %vl) {
; CHECK-LABEL: test_vssseg3_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a2, e8, m1, ta, mu
define void @test_vssseg3_mask_nxv8i8(<vscale x 8 x i8> %val, i8* %base, i64 %offset, <vscale x 8 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vssseg3_mask_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a2, e8, m1, ta, mu
define void @test_vssseg4_nxv8i8(<vscale x 8 x i8> %val, i8* %base, i64 %offset, i64 %vl) {
; CHECK-LABEL: test_vssseg4_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg4_mask_nxv8i8(<vscale x 8 x i8> %val, i8* %base, i64 %offset, <vscale x 8 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vssseg4_mask_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg5_nxv8i8(<vscale x 8 x i8> %val, i8* %base, i64 %offset, i64 %vl) {
; CHECK-LABEL: test_vssseg5_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg5_mask_nxv8i8(<vscale x 8 x i8> %val, i8* %base, i64 %offset, <vscale x 8 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vssseg5_mask_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg6_nxv8i8(<vscale x 8 x i8> %val, i8* %base, i64 %offset, i64 %vl) {
; CHECK-LABEL: test_vssseg6_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg6_mask_nxv8i8(<vscale x 8 x i8> %val, i8* %base, i64 %offset, <vscale x 8 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vssseg6_mask_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg7_nxv8i8(<vscale x 8 x i8> %val, i8* %base, i64 %offset, i64 %vl) {
; CHECK-LABEL: test_vssseg7_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg7_mask_nxv8i8(<vscale x 8 x i8> %val, i8* %base, i64 %offset, <vscale x 8 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vssseg7_mask_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg8_nxv8i8(<vscale x 8 x i8> %val, i8* %base, i64 %offset, i64 %vl) {
; CHECK-LABEL: test_vssseg8_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg8_mask_nxv8i8(<vscale x 8 x i8> %val, i8* %base, i64 %offset, <vscale x 8 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vssseg8_mask_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg2_nxv4i64(<vscale x 4 x i64> %val, i64* %base, i64 %offset, i64 %vl) {
; CHECK-LABEL: test_vssseg2_nxv4i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu
; CHECK-NEXT: vssseg2e64.v v8, (a0), a1
define void @test_vssseg2_mask_nxv4i64(<vscale x 4 x i64> %val, i64* %base, i64 %offset, <vscale x 4 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vssseg2_mask_nxv4i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu
; CHECK-NEXT: vssseg2e64.v v8, (a0), a1, v0.t
define void @test_vssseg2_nxv4i16(<vscale x 4 x i16> %val, i16* %base, i64 %offset, i64 %vl) {
; CHECK-LABEL: test_vssseg2_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, mu
; CHECK-NEXT: vssseg2e16.v v8, (a0), a1
define void @test_vssseg2_mask_nxv4i16(<vscale x 4 x i16> %val, i16* %base, i64 %offset, <vscale x 4 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vssseg2_mask_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, mu
; CHECK-NEXT: vssseg2e16.v v8, (a0), a1, v0.t
define void @test_vssseg3_nxv4i16(<vscale x 4 x i16> %val, i16* %base, i64 %offset, i64 %vl) {
; CHECK-LABEL: test_vssseg3_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, mu
define void @test_vssseg3_mask_nxv4i16(<vscale x 4 x i16> %val, i16* %base, i64 %offset, <vscale x 4 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vssseg3_mask_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, mu
define void @test_vssseg4_nxv4i16(<vscale x 4 x i16> %val, i16* %base, i64 %offset, i64 %vl) {
; CHECK-LABEL: test_vssseg4_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg4_mask_nxv4i16(<vscale x 4 x i16> %val, i16* %base, i64 %offset, <vscale x 4 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vssseg4_mask_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg5_nxv4i16(<vscale x 4 x i16> %val, i16* %base, i64 %offset, i64 %vl) {
; CHECK-LABEL: test_vssseg5_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg5_mask_nxv4i16(<vscale x 4 x i16> %val, i16* %base, i64 %offset, <vscale x 4 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vssseg5_mask_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg6_nxv4i16(<vscale x 4 x i16> %val, i16* %base, i64 %offset, i64 %vl) {
; CHECK-LABEL: test_vssseg6_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg6_mask_nxv4i16(<vscale x 4 x i16> %val, i16* %base, i64 %offset, <vscale x 4 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vssseg6_mask_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg7_nxv4i16(<vscale x 4 x i16> %val, i16* %base, i64 %offset, i64 %vl) {
; CHECK-LABEL: test_vssseg7_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg7_mask_nxv4i16(<vscale x 4 x i16> %val, i16* %base, i64 %offset, <vscale x 4 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vssseg7_mask_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg8_nxv4i16(<vscale x 4 x i16> %val, i16* %base, i64 %offset, i64 %vl) {
; CHECK-LABEL: test_vssseg8_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg8_mask_nxv4i16(<vscale x 4 x i16> %val, i16* %base, i64 %offset, <vscale x 4 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vssseg8_mask_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg2_nxv1i8(<vscale x 1 x i8> %val, i8* %base, i64 %offset, i64 %vl) {
; CHECK-LABEL: test_vssseg2_nxv1i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a2, e8, mf8, ta, mu
; CHECK-NEXT: vssseg2e8.v v8, (a0), a1
define void @test_vssseg2_mask_nxv1i8(<vscale x 1 x i8> %val, i8* %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vssseg2_mask_nxv1i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a2, e8, mf8, ta, mu
; CHECK-NEXT: vssseg2e8.v v8, (a0), a1, v0.t
define void @test_vssseg3_nxv1i8(<vscale x 1 x i8> %val, i8* %base, i64 %offset, i64 %vl) {
; CHECK-LABEL: test_vssseg3_nxv1i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a2, e8, mf8, ta, mu
define void @test_vssseg3_mask_nxv1i8(<vscale x 1 x i8> %val, i8* %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vssseg3_mask_nxv1i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a2, e8, mf8, ta, mu
define void @test_vssseg4_nxv1i8(<vscale x 1 x i8> %val, i8* %base, i64 %offset, i64 %vl) {
; CHECK-LABEL: test_vssseg4_nxv1i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg4_mask_nxv1i8(<vscale x 1 x i8> %val, i8* %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vssseg4_mask_nxv1i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg5_nxv1i8(<vscale x 1 x i8> %val, i8* %base, i64 %offset, i64 %vl) {
; CHECK-LABEL: test_vssseg5_nxv1i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg5_mask_nxv1i8(<vscale x 1 x i8> %val, i8* %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vssseg5_mask_nxv1i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg6_nxv1i8(<vscale x 1 x i8> %val, i8* %base, i64 %offset, i64 %vl) {
; CHECK-LABEL: test_vssseg6_nxv1i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg6_mask_nxv1i8(<vscale x 1 x i8> %val, i8* %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vssseg6_mask_nxv1i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg7_nxv1i8(<vscale x 1 x i8> %val, i8* %base, i64 %offset, i64 %vl) {
; CHECK-LABEL: test_vssseg7_nxv1i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg7_mask_nxv1i8(<vscale x 1 x i8> %val, i8* %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vssseg7_mask_nxv1i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg8_nxv1i8(<vscale x 1 x i8> %val, i8* %base, i64 %offset, i64 %vl) {
; CHECK-LABEL: test_vssseg8_nxv1i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg8_mask_nxv1i8(<vscale x 1 x i8> %val, i8* %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vssseg8_mask_nxv1i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg2_nxv2i8(<vscale x 2 x i8> %val, i8* %base, i64 %offset, i64 %vl) {
; CHECK-LABEL: test_vssseg2_nxv2i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a2, e8, mf4, ta, mu
; CHECK-NEXT: vssseg2e8.v v8, (a0), a1
define void @test_vssseg2_mask_nxv2i8(<vscale x 2 x i8> %val, i8* %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vssseg2_mask_nxv2i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a2, e8, mf4, ta, mu
; CHECK-NEXT: vssseg2e8.v v8, (a0), a1, v0.t
define void @test_vssseg3_nxv2i8(<vscale x 2 x i8> %val, i8* %base, i64 %offset, i64 %vl) {
; CHECK-LABEL: test_vssseg3_nxv2i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a2, e8, mf4, ta, mu
define void @test_vssseg3_mask_nxv2i8(<vscale x 2 x i8> %val, i8* %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vssseg3_mask_nxv2i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a2, e8, mf4, ta, mu
define void @test_vssseg4_nxv2i8(<vscale x 2 x i8> %val, i8* %base, i64 %offset, i64 %vl) {
; CHECK-LABEL: test_vssseg4_nxv2i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg4_mask_nxv2i8(<vscale x 2 x i8> %val, i8* %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vssseg4_mask_nxv2i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg5_nxv2i8(<vscale x 2 x i8> %val, i8* %base, i64 %offset, i64 %vl) {
; CHECK-LABEL: test_vssseg5_nxv2i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg5_mask_nxv2i8(<vscale x 2 x i8> %val, i8* %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vssseg5_mask_nxv2i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg6_nxv2i8(<vscale x 2 x i8> %val, i8* %base, i64 %offset, i64 %vl) {
; CHECK-LABEL: test_vssseg6_nxv2i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg6_mask_nxv2i8(<vscale x 2 x i8> %val, i8* %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vssseg6_mask_nxv2i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg7_nxv2i8(<vscale x 2 x i8> %val, i8* %base, i64 %offset, i64 %vl) {
; CHECK-LABEL: test_vssseg7_nxv2i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg7_mask_nxv2i8(<vscale x 2 x i8> %val, i8* %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vssseg7_mask_nxv2i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg8_nxv2i8(<vscale x 2 x i8> %val, i8* %base, i64 %offset, i64 %vl) {
; CHECK-LABEL: test_vssseg8_nxv2i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg8_mask_nxv2i8(<vscale x 2 x i8> %val, i8* %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vssseg8_mask_nxv2i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg2_nxv8i32(<vscale x 8 x i32> %val, i32* %base, i64 %offset, i64 %vl) {
; CHECK-LABEL: test_vssseg2_nxv8i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a2, e32, m4, ta, mu
; CHECK-NEXT: vssseg2e32.v v8, (a0), a1
define void @test_vssseg2_mask_nxv8i32(<vscale x 8 x i32> %val, i32* %base, i64 %offset, <vscale x 8 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vssseg2_mask_nxv8i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a2, e32, m4, ta, mu
; CHECK-NEXT: vssseg2e32.v v8, (a0), a1, v0.t
define void @test_vssseg2_nxv32i8(<vscale x 32 x i8> %val, i8* %base, i64 %offset, i64 %vl) {
; CHECK-LABEL: test_vssseg2_nxv32i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a2, e8, m4, ta, mu
; CHECK-NEXT: vssseg2e8.v v8, (a0), a1
define void @test_vssseg2_mask_nxv32i8(<vscale x 32 x i8> %val, i8* %base, i64 %offset, <vscale x 32 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vssseg2_mask_nxv32i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a2, e8, m4, ta, mu
; CHECK-NEXT: vssseg2e8.v v8, (a0), a1, v0.t
define void @test_vssseg2_nxv2i16(<vscale x 2 x i16> %val, i16* %base, i64 %offset, i64 %vl) {
; CHECK-LABEL: test_vssseg2_nxv2i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, mu
; CHECK-NEXT: vssseg2e16.v v8, (a0), a1
define void @test_vssseg2_mask_nxv2i16(<vscale x 2 x i16> %val, i16* %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vssseg2_mask_nxv2i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, mu
; CHECK-NEXT: vssseg2e16.v v8, (a0), a1, v0.t
define void @test_vssseg3_nxv2i16(<vscale x 2 x i16> %val, i16* %base, i64 %offset, i64 %vl) {
; CHECK-LABEL: test_vssseg3_nxv2i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, mu
define void @test_vssseg3_mask_nxv2i16(<vscale x 2 x i16> %val, i16* %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vssseg3_mask_nxv2i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, mu
define void @test_vssseg4_nxv2i16(<vscale x 2 x i16> %val, i16* %base, i64 %offset, i64 %vl) {
; CHECK-LABEL: test_vssseg4_nxv2i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg4_mask_nxv2i16(<vscale x 2 x i16> %val, i16* %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vssseg4_mask_nxv2i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg5_nxv2i16(<vscale x 2 x i16> %val, i16* %base, i64 %offset, i64 %vl) {
; CHECK-LABEL: test_vssseg5_nxv2i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg5_mask_nxv2i16(<vscale x 2 x i16> %val, i16* %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vssseg5_mask_nxv2i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg6_nxv2i16(<vscale x 2 x i16> %val, i16* %base, i64 %offset, i64 %vl) {
; CHECK-LABEL: test_vssseg6_nxv2i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg6_mask_nxv2i16(<vscale x 2 x i16> %val, i16* %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vssseg6_mask_nxv2i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg7_nxv2i16(<vscale x 2 x i16> %val, i16* %base, i64 %offset, i64 %vl) {
; CHECK-LABEL: test_vssseg7_nxv2i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg7_mask_nxv2i16(<vscale x 2 x i16> %val, i16* %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vssseg7_mask_nxv2i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg8_nxv2i16(<vscale x 2 x i16> %val, i16* %base, i64 %offset, i64 %vl) {
; CHECK-LABEL: test_vssseg8_nxv2i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg8_mask_nxv2i16(<vscale x 2 x i16> %val, i16* %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vssseg8_mask_nxv2i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg2_nxv2i64(<vscale x 2 x i64> %val, i64* %base, i64 %offset, i64 %vl) {
; CHECK-LABEL: test_vssseg2_nxv2i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu
; CHECK-NEXT: vssseg2e64.v v8, (a0), a1
define void @test_vssseg2_mask_nxv2i64(<vscale x 2 x i64> %val, i64* %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vssseg2_mask_nxv2i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu
; CHECK-NEXT: vssseg2e64.v v8, (a0), a1, v0.t
define void @test_vssseg3_nxv2i64(<vscale x 2 x i64> %val, i64* %base, i64 %offset, i64 %vl) {
; CHECK-LABEL: test_vssseg3_nxv2i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu
define void @test_vssseg3_mask_nxv2i64(<vscale x 2 x i64> %val, i64* %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vssseg3_mask_nxv2i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu
define void @test_vssseg4_nxv2i64(<vscale x 2 x i64> %val, i64* %base, i64 %offset, i64 %vl) {
; CHECK-LABEL: test_vssseg4_nxv2i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2_v14m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vmv2r.v v14, v8
define void @test_vssseg4_mask_nxv2i64(<vscale x 2 x i64> %val, i64* %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vssseg4_mask_nxv2i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2_v14m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vmv2r.v v14, v8
define void @test_vssseg2_nxv16f16(<vscale x 16 x half> %val, half* %base, i64 %offset, i64 %vl) {
; CHECK-LABEL: test_vssseg2_nxv16f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a2, e16, m4, ta, mu
; CHECK-NEXT: vssseg2e16.v v8, (a0), a1
define void @test_vssseg2_mask_nxv16f16(<vscale x 16 x half> %val, half* %base, i64 %offset, <vscale x 16 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vssseg2_mask_nxv16f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a2, e16, m4, ta, mu
; CHECK-NEXT: vssseg2e16.v v8, (a0), a1, v0.t
define void @test_vssseg2_nxv4f64(<vscale x 4 x double> %val, double* %base, i64 %offset, i64 %vl) {
; CHECK-LABEL: test_vssseg2_nxv4f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu
; CHECK-NEXT: vssseg2e64.v v8, (a0), a1
define void @test_vssseg2_mask_nxv4f64(<vscale x 4 x double> %val, double* %base, i64 %offset, <vscale x 4 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vssseg2_mask_nxv4f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu
; CHECK-NEXT: vssseg2e64.v v8, (a0), a1, v0.t
define void @test_vssseg2_nxv1f64(<vscale x 1 x double> %val, double* %base, i64 %offset, i64 %vl) {
; CHECK-LABEL: test_vssseg2_nxv1f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu
; CHECK-NEXT: vssseg2e64.v v8, (a0), a1
define void @test_vssseg2_mask_nxv1f64(<vscale x 1 x double> %val, double* %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vssseg2_mask_nxv1f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu
; CHECK-NEXT: vssseg2e64.v v8, (a0), a1, v0.t
define void @test_vssseg3_nxv1f64(<vscale x 1 x double> %val, double* %base, i64 %offset, i64 %vl) {
; CHECK-LABEL: test_vssseg3_nxv1f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu
define void @test_vssseg3_mask_nxv1f64(<vscale x 1 x double> %val, double* %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vssseg3_mask_nxv1f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu
define void @test_vssseg4_nxv1f64(<vscale x 1 x double> %val, double* %base, i64 %offset, i64 %vl) {
; CHECK-LABEL: test_vssseg4_nxv1f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg4_mask_nxv1f64(<vscale x 1 x double> %val, double* %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vssseg4_mask_nxv1f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg5_nxv1f64(<vscale x 1 x double> %val, double* %base, i64 %offset, i64 %vl) {
; CHECK-LABEL: test_vssseg5_nxv1f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg5_mask_nxv1f64(<vscale x 1 x double> %val, double* %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vssseg5_mask_nxv1f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg6_nxv1f64(<vscale x 1 x double> %val, double* %base, i64 %offset, i64 %vl) {
; CHECK-LABEL: test_vssseg6_nxv1f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg6_mask_nxv1f64(<vscale x 1 x double> %val, double* %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vssseg6_mask_nxv1f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg7_nxv1f64(<vscale x 1 x double> %val, double* %base, i64 %offset, i64 %vl) {
; CHECK-LABEL: test_vssseg7_nxv1f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg7_mask_nxv1f64(<vscale x 1 x double> %val, double* %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vssseg7_mask_nxv1f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg8_nxv1f64(<vscale x 1 x double> %val, double* %base, i64 %offset, i64 %vl) {
; CHECK-LABEL: test_vssseg8_nxv1f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg8_mask_nxv1f64(<vscale x 1 x double> %val, double* %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vssseg8_mask_nxv1f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg2_nxv2f32(<vscale x 2 x float> %val, float* %base, i64 %offset, i64 %vl) {
; CHECK-LABEL: test_vssseg2_nxv2f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, mu
; CHECK-NEXT: vssseg2e32.v v8, (a0), a1
define void @test_vssseg2_mask_nxv2f32(<vscale x 2 x float> %val, float* %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vssseg2_mask_nxv2f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, mu
; CHECK-NEXT: vssseg2e32.v v8, (a0), a1, v0.t
define void @test_vssseg3_nxv2f32(<vscale x 2 x float> %val, float* %base, i64 %offset, i64 %vl) {
; CHECK-LABEL: test_vssseg3_nxv2f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, mu
define void @test_vssseg3_mask_nxv2f32(<vscale x 2 x float> %val, float* %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vssseg3_mask_nxv2f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, mu
define void @test_vssseg4_nxv2f32(<vscale x 2 x float> %val, float* %base, i64 %offset, i64 %vl) {
; CHECK-LABEL: test_vssseg4_nxv2f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg4_mask_nxv2f32(<vscale x 2 x float> %val, float* %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vssseg4_mask_nxv2f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg5_nxv2f32(<vscale x 2 x float> %val, float* %base, i64 %offset, i64 %vl) {
; CHECK-LABEL: test_vssseg5_nxv2f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg5_mask_nxv2f32(<vscale x 2 x float> %val, float* %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vssseg5_mask_nxv2f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg6_nxv2f32(<vscale x 2 x float> %val, float* %base, i64 %offset, i64 %vl) {
; CHECK-LABEL: test_vssseg6_nxv2f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg6_mask_nxv2f32(<vscale x 2 x float> %val, float* %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vssseg6_mask_nxv2f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg7_nxv2f32(<vscale x 2 x float> %val, float* %base, i64 %offset, i64 %vl) {
; CHECK-LABEL: test_vssseg7_nxv2f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg7_mask_nxv2f32(<vscale x 2 x float> %val, float* %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vssseg7_mask_nxv2f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg8_nxv2f32(<vscale x 2 x float> %val, float* %base, i64 %offset, i64 %vl) {
; CHECK-LABEL: test_vssseg8_nxv2f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg8_mask_nxv2f32(<vscale x 2 x float> %val, float* %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vssseg8_mask_nxv2f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg2_nxv1f16(<vscale x 1 x half> %val, half* %base, i64 %offset, i64 %vl) {
; CHECK-LABEL: test_vssseg2_nxv1f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, mu
; CHECK-NEXT: vssseg2e16.v v8, (a0), a1
define void @test_vssseg2_mask_nxv1f16(<vscale x 1 x half> %val, half* %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vssseg2_mask_nxv1f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, mu
; CHECK-NEXT: vssseg2e16.v v8, (a0), a1, v0.t
define void @test_vssseg3_nxv1f16(<vscale x 1 x half> %val, half* %base, i64 %offset, i64 %vl) {
; CHECK-LABEL: test_vssseg3_nxv1f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, mu
define void @test_vssseg3_mask_nxv1f16(<vscale x 1 x half> %val, half* %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vssseg3_mask_nxv1f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, mu
define void @test_vssseg4_nxv1f16(<vscale x 1 x half> %val, half* %base, i64 %offset, i64 %vl) {
; CHECK-LABEL: test_vssseg4_nxv1f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg4_mask_nxv1f16(<vscale x 1 x half> %val, half* %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vssseg4_mask_nxv1f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg5_nxv1f16(<vscale x 1 x half> %val, half* %base, i64 %offset, i64 %vl) {
; CHECK-LABEL: test_vssseg5_nxv1f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg5_mask_nxv1f16(<vscale x 1 x half> %val, half* %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vssseg5_mask_nxv1f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg6_nxv1f16(<vscale x 1 x half> %val, half* %base, i64 %offset, i64 %vl) {
; CHECK-LABEL: test_vssseg6_nxv1f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg6_mask_nxv1f16(<vscale x 1 x half> %val, half* %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vssseg6_mask_nxv1f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg7_nxv1f16(<vscale x 1 x half> %val, half* %base, i64 %offset, i64 %vl) {
; CHECK-LABEL: test_vssseg7_nxv1f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg7_mask_nxv1f16(<vscale x 1 x half> %val, half* %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vssseg7_mask_nxv1f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg8_nxv1f16(<vscale x 1 x half> %val, half* %base, i64 %offset, i64 %vl) {
; CHECK-LABEL: test_vssseg8_nxv1f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg8_mask_nxv1f16(<vscale x 1 x half> %val, half* %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vssseg8_mask_nxv1f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg2_nxv1f32(<vscale x 1 x float> %val, float* %base, i64 %offset, i64 %vl) {
; CHECK-LABEL: test_vssseg2_nxv1f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, mu
; CHECK-NEXT: vssseg2e32.v v8, (a0), a1
define void @test_vssseg2_mask_nxv1f32(<vscale x 1 x float> %val, float* %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vssseg2_mask_nxv1f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, mu
; CHECK-NEXT: vssseg2e32.v v8, (a0), a1, v0.t
define void @test_vssseg3_nxv1f32(<vscale x 1 x float> %val, float* %base, i64 %offset, i64 %vl) {
; CHECK-LABEL: test_vssseg3_nxv1f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, mu
define void @test_vssseg3_mask_nxv1f32(<vscale x 1 x float> %val, float* %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vssseg3_mask_nxv1f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, mu
define void @test_vssseg4_nxv1f32(<vscale x 1 x float> %val, float* %base, i64 %offset, i64 %vl) {
; CHECK-LABEL: test_vssseg4_nxv1f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg4_mask_nxv1f32(<vscale x 1 x float> %val, float* %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vssseg4_mask_nxv1f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg5_nxv1f32(<vscale x 1 x float> %val, float* %base, i64 %offset, i64 %vl) {
; CHECK-LABEL: test_vssseg5_nxv1f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg5_mask_nxv1f32(<vscale x 1 x float> %val, float* %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vssseg5_mask_nxv1f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg6_nxv1f32(<vscale x 1 x float> %val, float* %base, i64 %offset, i64 %vl) {
; CHECK-LABEL: test_vssseg6_nxv1f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg6_mask_nxv1f32(<vscale x 1 x float> %val, float* %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vssseg6_mask_nxv1f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg7_nxv1f32(<vscale x 1 x float> %val, float* %base, i64 %offset, i64 %vl) {
; CHECK-LABEL: test_vssseg7_nxv1f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg7_mask_nxv1f32(<vscale x 1 x float> %val, float* %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vssseg7_mask_nxv1f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg8_nxv1f32(<vscale x 1 x float> %val, float* %base, i64 %offset, i64 %vl) {
; CHECK-LABEL: test_vssseg8_nxv1f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg8_mask_nxv1f32(<vscale x 1 x float> %val, float* %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vssseg8_mask_nxv1f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg2_nxv8f16(<vscale x 8 x half> %val, half* %base, i64 %offset, i64 %vl) {
; CHECK-LABEL: test_vssseg2_nxv8f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, mu
; CHECK-NEXT: vssseg2e16.v v8, (a0), a1
define void @test_vssseg2_mask_nxv8f16(<vscale x 8 x half> %val, half* %base, i64 %offset, <vscale x 8 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vssseg2_mask_nxv8f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, mu
; CHECK-NEXT: vssseg2e16.v v8, (a0), a1, v0.t
define void @test_vssseg3_nxv8f16(<vscale x 8 x half> %val, half* %base, i64 %offset, i64 %vl) {
; CHECK-LABEL: test_vssseg3_nxv8f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, mu
define void @test_vssseg3_mask_nxv8f16(<vscale x 8 x half> %val, half* %base, i64 %offset, <vscale x 8 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vssseg3_mask_nxv8f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, mu
define void @test_vssseg4_nxv8f16(<vscale x 8 x half> %val, half* %base, i64 %offset, i64 %vl) {
; CHECK-LABEL: test_vssseg4_nxv8f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2_v14m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vmv2r.v v14, v8
define void @test_vssseg4_mask_nxv8f16(<vscale x 8 x half> %val, half* %base, i64 %offset, <vscale x 8 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vssseg4_mask_nxv8f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2_v14m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vmv2r.v v14, v8
define void @test_vssseg2_nxv8f32(<vscale x 8 x float> %val, float* %base, i64 %offset, i64 %vl) {
; CHECK-LABEL: test_vssseg2_nxv8f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a2, e32, m4, ta, mu
; CHECK-NEXT: vssseg2e32.v v8, (a0), a1
define void @test_vssseg2_mask_nxv8f32(<vscale x 8 x float> %val, float* %base, i64 %offset, <vscale x 8 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vssseg2_mask_nxv8f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a2, e32, m4, ta, mu
; CHECK-NEXT: vssseg2e32.v v8, (a0), a1, v0.t
define void @test_vssseg2_nxv2f64(<vscale x 2 x double> %val, double* %base, i64 %offset, i64 %vl) {
; CHECK-LABEL: test_vssseg2_nxv2f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu
; CHECK-NEXT: vssseg2e64.v v8, (a0), a1
define void @test_vssseg2_mask_nxv2f64(<vscale x 2 x double> %val, double* %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vssseg2_mask_nxv2f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu
; CHECK-NEXT: vssseg2e64.v v8, (a0), a1, v0.t
define void @test_vssseg3_nxv2f64(<vscale x 2 x double> %val, double* %base, i64 %offset, i64 %vl) {
; CHECK-LABEL: test_vssseg3_nxv2f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu
define void @test_vssseg3_mask_nxv2f64(<vscale x 2 x double> %val, double* %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vssseg3_mask_nxv2f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu
define void @test_vssseg4_nxv2f64(<vscale x 2 x double> %val, double* %base, i64 %offset, i64 %vl) {
; CHECK-LABEL: test_vssseg4_nxv2f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2_v14m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vmv2r.v v14, v8
define void @test_vssseg4_mask_nxv2f64(<vscale x 2 x double> %val, double* %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vssseg4_mask_nxv2f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2_v14m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vmv2r.v v14, v8
define void @test_vssseg2_nxv4f16(<vscale x 4 x half> %val, half* %base, i64 %offset, i64 %vl) {
; CHECK-LABEL: test_vssseg2_nxv4f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, mu
; CHECK-NEXT: vssseg2e16.v v8, (a0), a1
define void @test_vssseg2_mask_nxv4f16(<vscale x 4 x half> %val, half* %base, i64 %offset, <vscale x 4 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vssseg2_mask_nxv4f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, mu
; CHECK-NEXT: vssseg2e16.v v8, (a0), a1, v0.t
define void @test_vssseg3_nxv4f16(<vscale x 4 x half> %val, half* %base, i64 %offset, i64 %vl) {
; CHECK-LABEL: test_vssseg3_nxv4f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, mu
define void @test_vssseg3_mask_nxv4f16(<vscale x 4 x half> %val, half* %base, i64 %offset, <vscale x 4 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vssseg3_mask_nxv4f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, mu
define void @test_vssseg4_nxv4f16(<vscale x 4 x half> %val, half* %base, i64 %offset, i64 %vl) {
; CHECK-LABEL: test_vssseg4_nxv4f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg4_mask_nxv4f16(<vscale x 4 x half> %val, half* %base, i64 %offset, <vscale x 4 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vssseg4_mask_nxv4f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg5_nxv4f16(<vscale x 4 x half> %val, half* %base, i64 %offset, i64 %vl) {
; CHECK-LABEL: test_vssseg5_nxv4f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg5_mask_nxv4f16(<vscale x 4 x half> %val, half* %base, i64 %offset, <vscale x 4 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vssseg5_mask_nxv4f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg6_nxv4f16(<vscale x 4 x half> %val, half* %base, i64 %offset, i64 %vl) {
; CHECK-LABEL: test_vssseg6_nxv4f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg6_mask_nxv4f16(<vscale x 4 x half> %val, half* %base, i64 %offset, <vscale x 4 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vssseg6_mask_nxv4f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg7_nxv4f16(<vscale x 4 x half> %val, half* %base, i64 %offset, i64 %vl) {
; CHECK-LABEL: test_vssseg7_nxv4f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg7_mask_nxv4f16(<vscale x 4 x half> %val, half* %base, i64 %offset, <vscale x 4 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vssseg7_mask_nxv4f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg8_nxv4f16(<vscale x 4 x half> %val, half* %base, i64 %offset, i64 %vl) {
; CHECK-LABEL: test_vssseg8_nxv4f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg8_mask_nxv4f16(<vscale x 4 x half> %val, half* %base, i64 %offset, <vscale x 4 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vssseg8_mask_nxv4f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg2_nxv2f16(<vscale x 2 x half> %val, half* %base, i64 %offset, i64 %vl) {
; CHECK-LABEL: test_vssseg2_nxv2f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, mu
; CHECK-NEXT: vssseg2e16.v v8, (a0), a1
define void @test_vssseg2_mask_nxv2f16(<vscale x 2 x half> %val, half* %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vssseg2_mask_nxv2f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, mu
; CHECK-NEXT: vssseg2e16.v v8, (a0), a1, v0.t
define void @test_vssseg3_nxv2f16(<vscale x 2 x half> %val, half* %base, i64 %offset, i64 %vl) {
; CHECK-LABEL: test_vssseg3_nxv2f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, mu
define void @test_vssseg3_mask_nxv2f16(<vscale x 2 x half> %val, half* %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vssseg3_mask_nxv2f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, mu
define void @test_vssseg4_nxv2f16(<vscale x 2 x half> %val, half* %base, i64 %offset, i64 %vl) {
; CHECK-LABEL: test_vssseg4_nxv2f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg4_mask_nxv2f16(<vscale x 2 x half> %val, half* %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vssseg4_mask_nxv2f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg5_nxv2f16(<vscale x 2 x half> %val, half* %base, i64 %offset, i64 %vl) {
; CHECK-LABEL: test_vssseg5_nxv2f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg5_mask_nxv2f16(<vscale x 2 x half> %val, half* %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vssseg5_mask_nxv2f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg6_nxv2f16(<vscale x 2 x half> %val, half* %base, i64 %offset, i64 %vl) {
; CHECK-LABEL: test_vssseg6_nxv2f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg6_mask_nxv2f16(<vscale x 2 x half> %val, half* %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vssseg6_mask_nxv2f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg7_nxv2f16(<vscale x 2 x half> %val, half* %base, i64 %offset, i64 %vl) {
; CHECK-LABEL: test_vssseg7_nxv2f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg7_mask_nxv2f16(<vscale x 2 x half> %val, half* %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vssseg7_mask_nxv2f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg8_nxv2f16(<vscale x 2 x half> %val, half* %base, i64 %offset, i64 %vl) {
; CHECK-LABEL: test_vssseg8_nxv2f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg8_mask_nxv2f16(<vscale x 2 x half> %val, half* %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vssseg8_mask_nxv2f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vssseg2_nxv4f32(<vscale x 4 x float> %val, float* %base, i64 %offset, i64 %vl) {
; CHECK-LABEL: test_vssseg2_nxv4f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a2, e32, m2, ta, mu
; CHECK-NEXT: vssseg2e32.v v8, (a0), a1
define void @test_vssseg2_mask_nxv4f32(<vscale x 4 x float> %val, float* %base, i64 %offset, <vscale x 4 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vssseg2_mask_nxv4f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a2, e32, m2, ta, mu
; CHECK-NEXT: vssseg2e32.v v8, (a0), a1, v0.t
define void @test_vssseg3_nxv4f32(<vscale x 4 x float> %val, float* %base, i64 %offset, i64 %vl) {
; CHECK-LABEL: test_vssseg3_nxv4f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vsetvli zero, a2, e32, m2, ta, mu
define void @test_vssseg3_mask_nxv4f32(<vscale x 4 x float> %val, float* %base, i64 %offset, <vscale x 4 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vssseg3_mask_nxv4f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vsetvli zero, a2, e32, m2, ta, mu
define void @test_vssseg4_nxv4f32(<vscale x 4 x float> %val, float* %base, i64 %offset, i64 %vl) {
; CHECK-LABEL: test_vssseg4_nxv4f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2_v14m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vmv2r.v v14, v8
define void @test_vssseg4_mask_nxv4f32(<vscale x 4 x float> %val, float* %base, i64 %offset, <vscale x 4 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vssseg4_mask_nxv4f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2_v14m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vmv2r.v v14, v8
define void @test_vsuxseg2_nxv16i16_nxv16i16(<vscale x 16 x i16> %val, i16* %base, <vscale x 16 x i16> %index, i32 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv16i16_nxv16i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
; CHECK-NEXT: vmv4r.v v16, v12
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
define void @test_vsuxseg2_mask_nxv16i16_nxv16i16(<vscale x 16 x i16> %val, i16* %base, <vscale x 16 x i16> %index, <vscale x 16 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv16i16_nxv16i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
; CHECK-NEXT: vmv4r.v v16, v12
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
define void @test_vsuxseg2_nxv16i16_nxv16i8(<vscale x 16 x i16> %val, i16* %base, <vscale x 16 x i8> %index, i32 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv16i16_nxv16i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
; CHECK-NEXT: vmv2r.v v16, v12
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
define void @test_vsuxseg2_mask_nxv16i16_nxv16i8(<vscale x 16 x i16> %val, i16* %base, <vscale x 16 x i8> %index, <vscale x 16 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv16i16_nxv16i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
; CHECK-NEXT: vmv2r.v v16, v12
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
define void @test_vsuxseg2_nxv16i16_nxv16i32(<vscale x 16 x i16> %val, i16* %base, <vscale x 16 x i32> %index, i32 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv16i16_nxv16i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v16
define void @test_vsuxseg2_mask_nxv16i16_nxv16i32(<vscale x 16 x i16> %val, i16* %base, <vscale x 16 x i32> %index, <vscale x 16 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv16i16_nxv16i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v16, v0.t
define void @test_vsuxseg2_nxv1i8_nxv1i8(<vscale x 1 x i8> %val, i8* %base, <vscale x 1 x i8> %index, i32 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv1i8_nxv1i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
define void @test_vsuxseg2_mask_nxv1i8_nxv1i8(<vscale x 1 x i8> %val, i8* %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv1i8_nxv1i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
define void @test_vsuxseg2_nxv1i8_nxv1i32(<vscale x 1 x i8> %val, i8* %base, <vscale x 1 x i32> %index, i32 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv1i8_nxv1i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
define void @test_vsuxseg2_mask_nxv1i8_nxv1i32(<vscale x 1 x i8> %val, i8* %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv1i8_nxv1i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
define void @test_vsuxseg2_nxv1i8_nxv1i16(<vscale x 1 x i8> %val, i8* %base, <vscale x 1 x i16> %index, i32 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv1i8_nxv1i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
define void @test_vsuxseg2_mask_nxv1i8_nxv1i16(<vscale x 1 x i8> %val, i8* %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv1i8_nxv1i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-LABEL: test_vsuxseg3_nxv1i8_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vsuxseg3ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_mask_nxv1i8_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vsuxseg3ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_nxv1i8_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vsuxseg3ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_mask_nxv1i8_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vsuxseg3ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_nxv1i8_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vsuxseg3ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_mask_nxv1i8_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vsuxseg3ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_nxv1i8_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vsuxseg4ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_mask_nxv1i8_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vsuxseg4ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_nxv1i8_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vsuxseg4ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_mask_nxv1i8_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vsuxseg4ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_nxv1i8_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vsuxseg4ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_mask_nxv1i8_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vsuxseg4ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_nxv1i8_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vsuxseg5ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_mask_nxv1i8_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vsuxseg5ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_nxv1i8_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vsuxseg5ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_mask_nxv1i8_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vsuxseg5ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_nxv1i8_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vsuxseg5ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_mask_nxv1i8_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vsuxseg5ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_nxv1i8_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vsuxseg6ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_mask_nxv1i8_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vsuxseg6ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_nxv1i8_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vsuxseg6ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_mask_nxv1i8_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vsuxseg6ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_nxv1i8_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vsuxseg6ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_mask_nxv1i8_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vsuxseg6ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_nxv1i8_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vsuxseg7ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_mask_nxv1i8_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vsuxseg7ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_nxv1i8_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vsuxseg7ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_mask_nxv1i8_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vsuxseg7ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_nxv1i8_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vsuxseg7ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_mask_nxv1i8_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vsuxseg7ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_nxv1i8_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vsuxseg8ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_mask_nxv1i8_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vsuxseg8ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_nxv1i8_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vsuxseg8ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_mask_nxv1i8_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vsuxseg8ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_nxv1i8_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vsuxseg8ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_mask_nxv1i8_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vsuxseg8ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
define void @test_vsuxseg2_nxv16i8_nxv16i16(<vscale x 16 x i8> %val, i8* %base, <vscale x 16 x i16> %index, i32 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv16i8_nxv16i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v12
define void @test_vsuxseg2_mask_nxv16i8_nxv16i16(<vscale x 16 x i8> %val, i8* %base, <vscale x 16 x i16> %index, <vscale x 16 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv16i8_nxv16i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v12, v0.t
define void @test_vsuxseg2_nxv16i8_nxv16i8(<vscale x 16 x i8> %val, i8* %base, <vscale x 16 x i8> %index, i32 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv16i8_nxv16i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
; CHECK-NEXT: vmv2r.v v12, v10
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
define void @test_vsuxseg2_mask_nxv16i8_nxv16i8(<vscale x 16 x i8> %val, i8* %base, <vscale x 16 x i8> %index, <vscale x 16 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv16i8_nxv16i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
; CHECK-NEXT: vmv2r.v v12, v10
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
define void @test_vsuxseg2_nxv16i8_nxv16i32(<vscale x 16 x i8> %val, i8* %base, <vscale x 16 x i32> %index, i32 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv16i8_nxv16i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v16
define void @test_vsuxseg2_mask_nxv16i8_nxv16i32(<vscale x 16 x i8> %val, i8* %base, <vscale x 16 x i32> %index, <vscale x 16 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv16i8_nxv16i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v16, v0.t
define void @test_vsuxseg3_nxv16i8_nxv16i16(<vscale x 16 x i8> %val, i8* %base, <vscale x 16 x i16> %index, i32 %vl) {
; CHECK-LABEL: test_vsuxseg3_nxv16i8_nxv16i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv2r.v v16, v8
-; CHECK-NEXT: vmv2r.v v18, v16
-; CHECK-NEXT: vmv2r.v v20, v16
+; CHECK-NEXT: vmv2r.v v10, v8
+; CHECK-NEXT: vmv4r.v v16, v12
+; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
-; CHECK-NEXT: vsuxseg3ei16.v v16, (a0), v12
+; CHECK-NEXT: vsuxseg3ei16.v v8, (a0), v16
; CHECK-NEXT: ret
entry:
tail call void @llvm.riscv.vsuxseg3.nxv16i8.nxv16i16(<vscale x 16 x i8> %val,<vscale x 16 x i8> %val,<vscale x 16 x i8> %val, i8* %base, <vscale x 16 x i16> %index, i32 %vl)
define void @test_vsuxseg3_mask_nxv16i8_nxv16i16(<vscale x 16 x i8> %val, i8* %base, <vscale x 16 x i16> %index, <vscale x 16 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsuxseg3_mask_nxv16i8_nxv16i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv2r.v v16, v8
-; CHECK-NEXT: vmv2r.v v18, v16
-; CHECK-NEXT: vmv2r.v v20, v16
+; CHECK-NEXT: vmv2r.v v10, v8
+; CHECK-NEXT: vmv4r.v v16, v12
+; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
-; CHECK-NEXT: vsuxseg3ei16.v v16, (a0), v12, v0.t
+; CHECK-NEXT: vsuxseg3ei16.v v8, (a0), v16, v0.t
; CHECK-NEXT: ret
entry:
tail call void @llvm.riscv.vsuxseg3.mask.nxv16i8.nxv16i16(<vscale x 16 x i8> %val,<vscale x 16 x i8> %val,<vscale x 16 x i8> %val, i8* %base, <vscale x 16 x i16> %index, <vscale x 16 x i1> %mask, i32 %vl)
; CHECK-LABEL: test_vsuxseg3_nxv16i8_nxv16i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
; CHECK-NEXT: vsuxseg3ei8.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_mask_nxv16i8_nxv16i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
; CHECK-NEXT: vsuxseg3ei8.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
define void @test_vsuxseg3_nxv16i8_nxv16i32(<vscale x 16 x i8> %val, i8* %base, <vscale x 16 x i32> %index, i32 %vl) {
; CHECK-LABEL: test_vsuxseg3_nxv16i8_nxv16i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
define void @test_vsuxseg3_mask_nxv16i8_nxv16i32(<vscale x 16 x i8> %val, i8* %base, <vscale x 16 x i32> %index, <vscale x 16 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsuxseg3_mask_nxv16i8_nxv16i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
; CHECK-LABEL: test_vsuxseg4_nxv16i8_nxv16i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v16, v8
-; CHECK-NEXT: vmv2r.v v18, v16
-; CHECK-NEXT: vmv2r.v v20, v16
-; CHECK-NEXT: vmv2r.v v22, v16
+; CHECK-NEXT: vmv2r.v v18, v8
+; CHECK-NEXT: vmv2r.v v20, v8
+; CHECK-NEXT: vmv2r.v v22, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
; CHECK-NEXT: vsuxseg4ei16.v v16, (a0), v12
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_mask_nxv16i8_nxv16i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v16, v8
-; CHECK-NEXT: vmv2r.v v18, v16
-; CHECK-NEXT: vmv2r.v v20, v16
-; CHECK-NEXT: vmv2r.v v22, v16
+; CHECK-NEXT: vmv2r.v v18, v8
+; CHECK-NEXT: vmv2r.v v20, v8
+; CHECK-NEXT: vmv2r.v v22, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
; CHECK-NEXT: vsuxseg4ei16.v v16, (a0), v12, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_nxv16i8_nxv16i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
; CHECK-NEXT: vsuxseg4ei8.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_mask_nxv16i8_nxv16i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
; CHECK-NEXT: vsuxseg4ei8.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
define void @test_vsuxseg4_nxv16i8_nxv16i32(<vscale x 16 x i8> %val, i8* %base, <vscale x 16 x i32> %index, i32 %vl) {
; CHECK-LABEL: test_vsuxseg4_nxv16i8_nxv16i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2_v14m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vmv2r.v v14, v8
define void @test_vsuxseg4_mask_nxv16i8_nxv16i32(<vscale x 16 x i8> %val, i8* %base, <vscale x 16 x i32> %index, <vscale x 16 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsuxseg4_mask_nxv16i8_nxv16i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2_v14m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vmv2r.v v14, v8
define void @test_vsuxseg2_nxv2i32_nxv2i32(<vscale x 2 x i32> %val, i32* %base, <vscale x 2 x i32> %index, i32 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv2i32_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
define void @test_vsuxseg2_mask_nxv2i32_nxv2i32(<vscale x 2 x i32> %val, i32* %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv2i32_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
define void @test_vsuxseg2_nxv2i32_nxv2i8(<vscale x 2 x i32> %val, i32* %base, <vscale x 2 x i8> %index, i32 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv2i32_nxv2i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
define void @test_vsuxseg2_mask_nxv2i32_nxv2i8(<vscale x 2 x i32> %val, i32* %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv2i32_nxv2i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
define void @test_vsuxseg2_nxv2i32_nxv2i16(<vscale x 2 x i32> %val, i32* %base, <vscale x 2 x i16> %index, i32 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv2i32_nxv2i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
define void @test_vsuxseg2_mask_nxv2i32_nxv2i16(<vscale x 2 x i32> %val, i32* %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv2i32_nxv2i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-LABEL: test_vsuxseg3_nxv2i32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vsuxseg3ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_mask_nxv2i32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vsuxseg3ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_nxv2i32_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vsuxseg3ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_mask_nxv2i32_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vsuxseg3ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_nxv2i32_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vsuxseg3ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_mask_nxv2i32_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vsuxseg3ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_nxv2i32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vsuxseg4ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_mask_nxv2i32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vsuxseg4ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_nxv2i32_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vsuxseg4ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_mask_nxv2i32_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vsuxseg4ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_nxv2i32_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vsuxseg4ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_mask_nxv2i32_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vsuxseg4ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_nxv2i32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vsuxseg5ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_mask_nxv2i32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vsuxseg5ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_nxv2i32_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vsuxseg5ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_mask_nxv2i32_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vsuxseg5ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_nxv2i32_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vsuxseg5ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_mask_nxv2i32_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vsuxseg5ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_nxv2i32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vsuxseg6ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_mask_nxv2i32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vsuxseg6ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_nxv2i32_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vsuxseg6ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_mask_nxv2i32_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vsuxseg6ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_nxv2i32_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vsuxseg6ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_mask_nxv2i32_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vsuxseg6ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_nxv2i32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vsuxseg7ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_mask_nxv2i32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vsuxseg7ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_nxv2i32_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vsuxseg7ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_mask_nxv2i32_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vsuxseg7ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_nxv2i32_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vsuxseg7ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_mask_nxv2i32_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vsuxseg7ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_nxv2i32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vsuxseg8ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_mask_nxv2i32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vsuxseg8ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_nxv2i32_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vsuxseg8ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_mask_nxv2i32_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vsuxseg8ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_nxv2i32_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vsuxseg8ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_mask_nxv2i32_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vsuxseg8ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
define void @test_vsuxseg2_nxv4i16_nxv4i16(<vscale x 4 x i16> %val, i16* %base, <vscale x 4 x i16> %index, i32 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv4i16_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
define void @test_vsuxseg2_mask_nxv4i16_nxv4i16(<vscale x 4 x i16> %val, i16* %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv4i16_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
define void @test_vsuxseg2_nxv4i16_nxv4i8(<vscale x 4 x i16> %val, i16* %base, <vscale x 4 x i8> %index, i32 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv4i16_nxv4i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
define void @test_vsuxseg2_mask_nxv4i16_nxv4i8(<vscale x 4 x i16> %val, i16* %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv4i16_nxv4i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
define void @test_vsuxseg2_nxv4i16_nxv4i32(<vscale x 4 x i16> %val, i16* %base, <vscale x 4 x i32> %index, i32 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv4i16_nxv4i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v10
define void @test_vsuxseg2_mask_nxv4i16_nxv4i32(<vscale x 4 x i16> %val, i16* %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv4i16_nxv4i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v10, v0.t
; CHECK-LABEL: test_vsuxseg3_nxv4i16_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vsuxseg3ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_mask_nxv4i16_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vsuxseg3ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_nxv4i16_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vsuxseg3ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_mask_nxv4i16_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vsuxseg3ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
define void @test_vsuxseg3_nxv4i16_nxv4i32(<vscale x 4 x i16> %val, i16* %base, <vscale x 4 x i32> %index, i32 %vl) {
; CHECK-LABEL: test_vsuxseg3_nxv4i16_nxv4i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv2r.v v12, v10
+; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
-; CHECK-NEXT: vsuxseg3ei32.v v12, (a0), v10
+; CHECK-NEXT: vsuxseg3ei32.v v8, (a0), v12
; CHECK-NEXT: ret
entry:
tail call void @llvm.riscv.vsuxseg3.nxv4i16.nxv4i32(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 4 x i32> %index, i32 %vl)
define void @test_vsuxseg3_mask_nxv4i16_nxv4i32(<vscale x 4 x i16> %val, i16* %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsuxseg3_mask_nxv4i16_nxv4i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv2r.v v12, v10
+; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
-; CHECK-NEXT: vsuxseg3ei32.v v12, (a0), v10, v0.t
+; CHECK-NEXT: vsuxseg3ei32.v v8, (a0), v12, v0.t
; CHECK-NEXT: ret
entry:
tail call void @llvm.riscv.vsuxseg3.mask.nxv4i16.nxv4i32(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl)
; CHECK-LABEL: test_vsuxseg4_nxv4i16_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vsuxseg4ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_mask_nxv4i16_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vsuxseg4ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_nxv4i16_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vsuxseg4ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_mask_nxv4i16_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vsuxseg4ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_nxv4i16_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vsuxseg4ei32.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_mask_nxv4i16_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vsuxseg4ei32.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_nxv4i16_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vsuxseg5ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_mask_nxv4i16_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vsuxseg5ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_nxv4i16_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vsuxseg5ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_mask_nxv4i16_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vsuxseg5ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_nxv4i16_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vsuxseg5ei32.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_mask_nxv4i16_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vsuxseg5ei32.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_nxv4i16_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vsuxseg6ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_mask_nxv4i16_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vsuxseg6ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_nxv4i16_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vsuxseg6ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_mask_nxv4i16_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vsuxseg6ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_nxv4i16_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vsuxseg6ei32.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_mask_nxv4i16_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vsuxseg6ei32.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_nxv4i16_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vsuxseg7ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_mask_nxv4i16_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vsuxseg7ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_nxv4i16_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vsuxseg7ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_mask_nxv4i16_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vsuxseg7ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_nxv4i16_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
-; CHECK-NEXT: vmv1r.v v18, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vsuxseg7ei32.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_mask_nxv4i16_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
-; CHECK-NEXT: vmv1r.v v18, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vsuxseg7ei32.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_nxv4i16_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vsuxseg8ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_mask_nxv4i16_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vsuxseg8ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_nxv4i16_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vsuxseg8ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_mask_nxv4i16_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vsuxseg8ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_nxv4i16_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
-; CHECK-NEXT: vmv1r.v v18, v12
-; CHECK-NEXT: vmv1r.v v19, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
+; CHECK-NEXT: vmv1r.v v19, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vsuxseg8ei32.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_mask_nxv4i16_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
-; CHECK-NEXT: vmv1r.v v18, v12
-; CHECK-NEXT: vmv1r.v v19, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
+; CHECK-NEXT: vmv1r.v v19, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vsuxseg8ei32.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
define void @test_vsuxseg2_nxv1i32_nxv1i8(<vscale x 1 x i32> %val, i32* %base, <vscale x 1 x i8> %index, i32 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv1i32_nxv1i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
define void @test_vsuxseg2_mask_nxv1i32_nxv1i8(<vscale x 1 x i32> %val, i32* %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv1i32_nxv1i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
define void @test_vsuxseg2_nxv1i32_nxv1i32(<vscale x 1 x i32> %val, i32* %base, <vscale x 1 x i32> %index, i32 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv1i32_nxv1i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
define void @test_vsuxseg2_mask_nxv1i32_nxv1i32(<vscale x 1 x i32> %val, i32* %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv1i32_nxv1i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
define void @test_vsuxseg2_nxv1i32_nxv1i16(<vscale x 1 x i32> %val, i32* %base, <vscale x 1 x i16> %index, i32 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv1i32_nxv1i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
define void @test_vsuxseg2_mask_nxv1i32_nxv1i16(<vscale x 1 x i32> %val, i32* %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv1i32_nxv1i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-LABEL: test_vsuxseg3_nxv1i32_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vsuxseg3ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_mask_nxv1i32_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vsuxseg3ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_nxv1i32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vsuxseg3ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_mask_nxv1i32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vsuxseg3ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_nxv1i32_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vsuxseg3ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_mask_nxv1i32_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vsuxseg3ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_nxv1i32_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vsuxseg4ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_mask_nxv1i32_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vsuxseg4ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_nxv1i32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vsuxseg4ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_mask_nxv1i32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vsuxseg4ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_nxv1i32_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vsuxseg4ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_mask_nxv1i32_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vsuxseg4ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_nxv1i32_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vsuxseg5ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_mask_nxv1i32_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vsuxseg5ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_nxv1i32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vsuxseg5ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_mask_nxv1i32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vsuxseg5ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_nxv1i32_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vsuxseg5ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_mask_nxv1i32_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vsuxseg5ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_nxv1i32_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vsuxseg6ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_mask_nxv1i32_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vsuxseg6ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_nxv1i32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vsuxseg6ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_mask_nxv1i32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vsuxseg6ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_nxv1i32_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vsuxseg6ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_mask_nxv1i32_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vsuxseg6ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_nxv1i32_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vsuxseg7ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_mask_nxv1i32_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vsuxseg7ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_nxv1i32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vsuxseg7ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_mask_nxv1i32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vsuxseg7ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_nxv1i32_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vsuxseg7ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_mask_nxv1i32_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vsuxseg7ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_nxv1i32_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vsuxseg8ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_mask_nxv1i32_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vsuxseg8ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_nxv1i32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vsuxseg8ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_mask_nxv1i32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vsuxseg8ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_nxv1i32_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vsuxseg8ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_mask_nxv1i32_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vsuxseg8ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
define void @test_vsuxseg2_nxv8i16_nxv8i16(<vscale x 8 x i16> %val, i16* %base, <vscale x 8 x i16> %index, i32 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv8i16_nxv8i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
; CHECK-NEXT: vmv2r.v v12, v10
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
define void @test_vsuxseg2_mask_nxv8i16_nxv8i16(<vscale x 8 x i16> %val, i16* %base, <vscale x 8 x i16> %index, <vscale x 8 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv8i16_nxv8i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
; CHECK-NEXT: vmv2r.v v12, v10
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
define void @test_vsuxseg2_nxv8i16_nxv8i8(<vscale x 8 x i16> %val, i16* %base, <vscale x 8 x i8> %index, i32 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv8i16_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
; CHECK-NEXT: vmv1r.v v12, v10
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
define void @test_vsuxseg2_mask_nxv8i16_nxv8i8(<vscale x 8 x i16> %val, i16* %base, <vscale x 8 x i8> %index, <vscale x 8 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv8i16_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
; CHECK-NEXT: vmv1r.v v12, v10
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
define void @test_vsuxseg2_nxv8i16_nxv8i32(<vscale x 8 x i16> %val, i16* %base, <vscale x 8 x i32> %index, i32 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv8i16_nxv8i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v12
define void @test_vsuxseg2_mask_nxv8i16_nxv8i32(<vscale x 8 x i16> %val, i16* %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv8i16_nxv8i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v12, v0.t
; CHECK-LABEL: test_vsuxseg3_nxv8i16_nxv8i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
; CHECK-NEXT: vsuxseg3ei16.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_mask_nxv8i16_nxv8i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
; CHECK-NEXT: vsuxseg3ei16.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_nxv8i16_nxv8i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
; CHECK-NEXT: vsuxseg3ei8.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_mask_nxv8i16_nxv8i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
; CHECK-NEXT: vsuxseg3ei8.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
define void @test_vsuxseg3_nxv8i16_nxv8i32(<vscale x 8 x i16> %val, i16* %base, <vscale x 8 x i32> %index, i32 %vl) {
; CHECK-LABEL: test_vsuxseg3_nxv8i16_nxv8i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv2r.v v16, v8
-; CHECK-NEXT: vmv2r.v v18, v16
-; CHECK-NEXT: vmv2r.v v20, v16
+; CHECK-NEXT: vmv2r.v v10, v8
+; CHECK-NEXT: vmv4r.v v16, v12
+; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
-; CHECK-NEXT: vsuxseg3ei32.v v16, (a0), v12
+; CHECK-NEXT: vsuxseg3ei32.v v8, (a0), v16
; CHECK-NEXT: ret
entry:
tail call void @llvm.riscv.vsuxseg3.nxv8i16.nxv8i32(<vscale x 8 x i16> %val,<vscale x 8 x i16> %val,<vscale x 8 x i16> %val, i16* %base, <vscale x 8 x i32> %index, i32 %vl)
define void @test_vsuxseg3_mask_nxv8i16_nxv8i32(<vscale x 8 x i16> %val, i16* %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsuxseg3_mask_nxv8i16_nxv8i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv2r.v v16, v8
-; CHECK-NEXT: vmv2r.v v18, v16
-; CHECK-NEXT: vmv2r.v v20, v16
+; CHECK-NEXT: vmv2r.v v10, v8
+; CHECK-NEXT: vmv4r.v v16, v12
+; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
-; CHECK-NEXT: vsuxseg3ei32.v v16, (a0), v12, v0.t
+; CHECK-NEXT: vsuxseg3ei32.v v8, (a0), v16, v0.t
; CHECK-NEXT: ret
entry:
tail call void @llvm.riscv.vsuxseg3.mask.nxv8i16.nxv8i32(<vscale x 8 x i16> %val,<vscale x 8 x i16> %val,<vscale x 8 x i16> %val, i16* %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i32 %vl)
; CHECK-LABEL: test_vsuxseg4_nxv8i16_nxv8i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
-; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
+; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
; CHECK-NEXT: vsuxseg4ei16.v v12, (a0), v10
; CHECK-NEXT: ret
entry:
; CHECK-LABEL: test_vsuxseg4_mask_nxv8i16_nxv8i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
; CHECK-NEXT: vsuxseg4ei16.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_nxv8i16_nxv8i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
; CHECK-NEXT: vsuxseg4ei8.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_mask_nxv8i16_nxv8i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
; CHECK-NEXT: vsuxseg4ei8.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_nxv8i16_nxv8i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v16, v8
-; CHECK-NEXT: vmv2r.v v18, v16
-; CHECK-NEXT: vmv2r.v v20, v16
-; CHECK-NEXT: vmv2r.v v22, v16
+; CHECK-NEXT: vmv2r.v v18, v8
+; CHECK-NEXT: vmv2r.v v20, v8
+; CHECK-NEXT: vmv2r.v v22, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
; CHECK-NEXT: vsuxseg4ei32.v v16, (a0), v12
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_mask_nxv8i16_nxv8i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v16, v8
-; CHECK-NEXT: vmv2r.v v18, v16
-; CHECK-NEXT: vmv2r.v v20, v16
-; CHECK-NEXT: vmv2r.v v22, v16
+; CHECK-NEXT: vmv2r.v v18, v8
+; CHECK-NEXT: vmv2r.v v20, v8
+; CHECK-NEXT: vmv2r.v v22, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
; CHECK-NEXT: vsuxseg4ei32.v v16, (a0), v12, v0.t
; CHECK-NEXT: ret
define void @test_vsuxseg2_nxv8i8_nxv8i16(<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i16> %index, i32 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv8i8_nxv8i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v10
define void @test_vsuxseg2_mask_nxv8i8_nxv8i16(<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i16> %index, <vscale x 8 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv8i8_nxv8i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v10, v0.t
define void @test_vsuxseg2_nxv8i8_nxv8i8(<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i8> %index, i32 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv8i8_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
define void @test_vsuxseg2_mask_nxv8i8_nxv8i8(<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i8> %index, <vscale x 8 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv8i8_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
define void @test_vsuxseg2_nxv8i8_nxv8i32(<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i32> %index, i32 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv8i8_nxv8i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v12
define void @test_vsuxseg2_mask_nxv8i8_nxv8i32(<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv8i8_nxv8i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v12, v0.t
define void @test_vsuxseg3_nxv8i8_nxv8i16(<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i16> %index, i32 %vl) {
; CHECK-LABEL: test_vsuxseg3_nxv8i8_nxv8i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv2r.v v12, v10
+; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
-; CHECK-NEXT: vsuxseg3ei16.v v12, (a0), v10
+; CHECK-NEXT: vsuxseg3ei16.v v8, (a0), v12
; CHECK-NEXT: ret
entry:
tail call void @llvm.riscv.vsuxseg3.nxv8i8.nxv8i16(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i16> %index, i32 %vl)
define void @test_vsuxseg3_mask_nxv8i8_nxv8i16(<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i16> %index, <vscale x 8 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsuxseg3_mask_nxv8i8_nxv8i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv2r.v v12, v10
+; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
-; CHECK-NEXT: vsuxseg3ei16.v v12, (a0), v10, v0.t
+; CHECK-NEXT: vsuxseg3ei16.v v8, (a0), v12, v0.t
; CHECK-NEXT: ret
entry:
tail call void @llvm.riscv.vsuxseg3.mask.nxv8i8.nxv8i16(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i16> %index, <vscale x 8 x i1> %mask, i32 %vl)
; CHECK-LABEL: test_vsuxseg3_nxv8i8_nxv8i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vsuxseg3ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_mask_nxv8i8_nxv8i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vsuxseg3ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
define void @test_vsuxseg3_nxv8i8_nxv8i32(<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i32> %index, i32 %vl) {
; CHECK-LABEL: test_vsuxseg3_nxv8i8_nxv8i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
define void @test_vsuxseg3_mask_nxv8i8_nxv8i32(<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsuxseg3_mask_nxv8i8_nxv8i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-LABEL: test_vsuxseg4_nxv8i8_nxv8i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vsuxseg4ei16.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_mask_nxv8i8_nxv8i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vsuxseg4ei16.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_nxv8i8_nxv8i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vsuxseg4ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_mask_nxv8i8_nxv8i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vsuxseg4ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
define void @test_vsuxseg4_nxv8i8_nxv8i32(<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i32> %index, i32 %vl) {
; CHECK-LABEL: test_vsuxseg4_nxv8i8_nxv8i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsuxseg4_mask_nxv8i8_nxv8i32(<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsuxseg4_mask_nxv8i8_nxv8i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
; CHECK-LABEL: test_vsuxseg5_nxv8i8_nxv8i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vsuxseg5ei16.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_mask_nxv8i8_nxv8i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vsuxseg5ei16.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_nxv8i8_nxv8i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vsuxseg5ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_mask_nxv8i8_nxv8i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vsuxseg5ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
define void @test_vsuxseg5_nxv8i8_nxv8i32(<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i32> %index, i32 %vl) {
; CHECK-LABEL: test_vsuxseg5_nxv8i8_nxv8i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v16, v8
-; CHECK-NEXT: vmv1r.v v17, v16
-; CHECK-NEXT: vmv1r.v v18, v16
-; CHECK-NEXT: vmv1r.v v19, v16
-; CHECK-NEXT: vmv1r.v v20, v16
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv4r.v v16, v12
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
-; CHECK-NEXT: vsuxseg5ei32.v v16, (a0), v12
+; CHECK-NEXT: vsuxseg5ei32.v v8, (a0), v16
; CHECK-NEXT: ret
entry:
tail call void @llvm.riscv.vsuxseg5.nxv8i8.nxv8i32(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i32> %index, i32 %vl)
define void @test_vsuxseg5_mask_nxv8i8_nxv8i32(<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsuxseg5_mask_nxv8i8_nxv8i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v16, v8
-; CHECK-NEXT: vmv1r.v v17, v16
-; CHECK-NEXT: vmv1r.v v18, v16
-; CHECK-NEXT: vmv1r.v v19, v16
-; CHECK-NEXT: vmv1r.v v20, v16
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv4r.v v16, v12
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
-; CHECK-NEXT: vsuxseg5ei32.v v16, (a0), v12, v0.t
+; CHECK-NEXT: vsuxseg5ei32.v v8, (a0), v16, v0.t
; CHECK-NEXT: ret
entry:
tail call void @llvm.riscv.vsuxseg5.mask.nxv8i8.nxv8i32(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i32 %vl)
; CHECK-LABEL: test_vsuxseg6_nxv8i8_nxv8i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vsuxseg6ei16.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_mask_nxv8i8_nxv8i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vsuxseg6ei16.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_nxv8i8_nxv8i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vsuxseg6ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_mask_nxv8i8_nxv8i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vsuxseg6ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_nxv8i8_nxv8i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v16, v8
-; CHECK-NEXT: vmv1r.v v17, v16
-; CHECK-NEXT: vmv1r.v v18, v16
-; CHECK-NEXT: vmv1r.v v19, v16
-; CHECK-NEXT: vmv1r.v v20, v16
-; CHECK-NEXT: vmv1r.v v21, v16
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
+; CHECK-NEXT: vmv1r.v v19, v8
+; CHECK-NEXT: vmv1r.v v20, v8
+; CHECK-NEXT: vmv1r.v v21, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vsuxseg6ei32.v v16, (a0), v12
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_mask_nxv8i8_nxv8i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v16, v8
-; CHECK-NEXT: vmv1r.v v17, v16
-; CHECK-NEXT: vmv1r.v v18, v16
-; CHECK-NEXT: vmv1r.v v19, v16
-; CHECK-NEXT: vmv1r.v v20, v16
-; CHECK-NEXT: vmv1r.v v21, v16
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
+; CHECK-NEXT: vmv1r.v v19, v8
+; CHECK-NEXT: vmv1r.v v20, v8
+; CHECK-NEXT: vmv1r.v v21, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vsuxseg6ei32.v v16, (a0), v12, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_nxv8i8_nxv8i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
-; CHECK-NEXT: vmv1r.v v18, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vsuxseg7ei16.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_mask_nxv8i8_nxv8i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
-; CHECK-NEXT: vmv1r.v v18, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vsuxseg7ei16.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_nxv8i8_nxv8i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vsuxseg7ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_mask_nxv8i8_nxv8i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vsuxseg7ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_nxv8i8_nxv8i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v16, v8
-; CHECK-NEXT: vmv1r.v v17, v16
-; CHECK-NEXT: vmv1r.v v18, v16
-; CHECK-NEXT: vmv1r.v v19, v16
-; CHECK-NEXT: vmv1r.v v20, v16
-; CHECK-NEXT: vmv1r.v v21, v16
-; CHECK-NEXT: vmv1r.v v22, v16
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
+; CHECK-NEXT: vmv1r.v v19, v8
+; CHECK-NEXT: vmv1r.v v20, v8
+; CHECK-NEXT: vmv1r.v v21, v8
+; CHECK-NEXT: vmv1r.v v22, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vsuxseg7ei32.v v16, (a0), v12
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_mask_nxv8i8_nxv8i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v16, v8
-; CHECK-NEXT: vmv1r.v v17, v16
-; CHECK-NEXT: vmv1r.v v18, v16
-; CHECK-NEXT: vmv1r.v v19, v16
-; CHECK-NEXT: vmv1r.v v20, v16
-; CHECK-NEXT: vmv1r.v v21, v16
-; CHECK-NEXT: vmv1r.v v22, v16
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
+; CHECK-NEXT: vmv1r.v v19, v8
+; CHECK-NEXT: vmv1r.v v20, v8
+; CHECK-NEXT: vmv1r.v v21, v8
+; CHECK-NEXT: vmv1r.v v22, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vsuxseg7ei32.v v16, (a0), v12, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_nxv8i8_nxv8i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
-; CHECK-NEXT: vmv1r.v v18, v12
-; CHECK-NEXT: vmv1r.v v19, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
+; CHECK-NEXT: vmv1r.v v19, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vsuxseg8ei16.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_mask_nxv8i8_nxv8i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
-; CHECK-NEXT: vmv1r.v v18, v12
-; CHECK-NEXT: vmv1r.v v19, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
+; CHECK-NEXT: vmv1r.v v19, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vsuxseg8ei16.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_nxv8i8_nxv8i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vsuxseg8ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_mask_nxv8i8_nxv8i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vsuxseg8ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_nxv8i8_nxv8i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v16, v8
-; CHECK-NEXT: vmv1r.v v17, v16
-; CHECK-NEXT: vmv1r.v v18, v16
-; CHECK-NEXT: vmv1r.v v19, v16
-; CHECK-NEXT: vmv1r.v v20, v16
-; CHECK-NEXT: vmv1r.v v21, v16
-; CHECK-NEXT: vmv1r.v v22, v16
-; CHECK-NEXT: vmv1r.v v23, v16
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
+; CHECK-NEXT: vmv1r.v v19, v8
+; CHECK-NEXT: vmv1r.v v20, v8
+; CHECK-NEXT: vmv1r.v v21, v8
+; CHECK-NEXT: vmv1r.v v22, v8
+; CHECK-NEXT: vmv1r.v v23, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vsuxseg8ei32.v v16, (a0), v12
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_mask_nxv8i8_nxv8i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v16, v8
-; CHECK-NEXT: vmv1r.v v17, v16
-; CHECK-NEXT: vmv1r.v v18, v16
-; CHECK-NEXT: vmv1r.v v19, v16
-; CHECK-NEXT: vmv1r.v v20, v16
-; CHECK-NEXT: vmv1r.v v21, v16
-; CHECK-NEXT: vmv1r.v v22, v16
-; CHECK-NEXT: vmv1r.v v23, v16
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
+; CHECK-NEXT: vmv1r.v v19, v8
+; CHECK-NEXT: vmv1r.v v20, v8
+; CHECK-NEXT: vmv1r.v v21, v8
+; CHECK-NEXT: vmv1r.v v22, v8
+; CHECK-NEXT: vmv1r.v v23, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vsuxseg8ei32.v v16, (a0), v12, v0.t
; CHECK-NEXT: ret
define void @test_vsuxseg2_nxv8i32_nxv8i16(<vscale x 8 x i32> %val, i32* %base, <vscale x 8 x i16> %index, i32 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv8i32_nxv8i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
; CHECK-NEXT: vmv2r.v v16, v12
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
define void @test_vsuxseg2_mask_nxv8i32_nxv8i16(<vscale x 8 x i32> %val, i32* %base, <vscale x 8 x i16> %index, <vscale x 8 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv8i32_nxv8i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
; CHECK-NEXT: vmv2r.v v16, v12
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
define void @test_vsuxseg2_nxv8i32_nxv8i8(<vscale x 8 x i32> %val, i32* %base, <vscale x 8 x i8> %index, i32 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv8i32_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
; CHECK-NEXT: vmv1r.v v16, v12
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
define void @test_vsuxseg2_mask_nxv8i32_nxv8i8(<vscale x 8 x i32> %val, i32* %base, <vscale x 8 x i8> %index, <vscale x 8 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv8i32_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
; CHECK-NEXT: vmv1r.v v16, v12
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
define void @test_vsuxseg2_nxv8i32_nxv8i32(<vscale x 8 x i32> %val, i32* %base, <vscale x 8 x i32> %index, i32 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv8i32_nxv8i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
; CHECK-NEXT: vmv4r.v v16, v12
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
define void @test_vsuxseg2_mask_nxv8i32_nxv8i32(<vscale x 8 x i32> %val, i32* %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv8i32_nxv8i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
; CHECK-NEXT: vmv4r.v v16, v12
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
define void @test_vsuxseg2_nxv4i8_nxv4i16(<vscale x 4 x i8> %val, i8* %base, <vscale x 4 x i16> %index, i32 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv4i8_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
define void @test_vsuxseg2_mask_nxv4i8_nxv4i16(<vscale x 4 x i8> %val, i8* %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv4i8_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
define void @test_vsuxseg2_nxv4i8_nxv4i8(<vscale x 4 x i8> %val, i8* %base, <vscale x 4 x i8> %index, i32 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv4i8_nxv4i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
define void @test_vsuxseg2_mask_nxv4i8_nxv4i8(<vscale x 4 x i8> %val, i8* %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv4i8_nxv4i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
define void @test_vsuxseg2_nxv4i8_nxv4i32(<vscale x 4 x i8> %val, i8* %base, <vscale x 4 x i32> %index, i32 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv4i8_nxv4i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v10
define void @test_vsuxseg2_mask_nxv4i8_nxv4i32(<vscale x 4 x i8> %val, i8* %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv4i8_nxv4i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v10, v0.t
; CHECK-LABEL: test_vsuxseg3_nxv4i8_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vsuxseg3ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_mask_nxv4i8_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vsuxseg3ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_nxv4i8_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vsuxseg3ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_mask_nxv4i8_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vsuxseg3ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
define void @test_vsuxseg3_nxv4i8_nxv4i32(<vscale x 4 x i8> %val, i8* %base, <vscale x 4 x i32> %index, i32 %vl) {
; CHECK-LABEL: test_vsuxseg3_nxv4i8_nxv4i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv2r.v v12, v10
+; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
-; CHECK-NEXT: vsuxseg3ei32.v v12, (a0), v10
+; CHECK-NEXT: vsuxseg3ei32.v v8, (a0), v12
; CHECK-NEXT: ret
entry:
tail call void @llvm.riscv.vsuxseg3.nxv4i8.nxv4i32(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 4 x i32> %index, i32 %vl)
define void @test_vsuxseg3_mask_nxv4i8_nxv4i32(<vscale x 4 x i8> %val, i8* %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsuxseg3_mask_nxv4i8_nxv4i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv2r.v v12, v10
+; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
-; CHECK-NEXT: vsuxseg3ei32.v v12, (a0), v10, v0.t
+; CHECK-NEXT: vsuxseg3ei32.v v8, (a0), v12, v0.t
; CHECK-NEXT: ret
entry:
tail call void @llvm.riscv.vsuxseg3.mask.nxv4i8.nxv4i32(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl)
; CHECK-LABEL: test_vsuxseg4_nxv4i8_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vsuxseg4ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_mask_nxv4i8_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vsuxseg4ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_nxv4i8_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vsuxseg4ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_mask_nxv4i8_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vsuxseg4ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_nxv4i8_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vsuxseg4ei32.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_mask_nxv4i8_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vsuxseg4ei32.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_nxv4i8_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vsuxseg5ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_mask_nxv4i8_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vsuxseg5ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_nxv4i8_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vsuxseg5ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_mask_nxv4i8_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vsuxseg5ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_nxv4i8_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vsuxseg5ei32.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_mask_nxv4i8_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vsuxseg5ei32.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_nxv4i8_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vsuxseg6ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_mask_nxv4i8_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vsuxseg6ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_nxv4i8_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vsuxseg6ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_mask_nxv4i8_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vsuxseg6ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_nxv4i8_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vsuxseg6ei32.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_mask_nxv4i8_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vsuxseg6ei32.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_nxv4i8_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vsuxseg7ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_mask_nxv4i8_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vsuxseg7ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_nxv4i8_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vsuxseg7ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_mask_nxv4i8_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vsuxseg7ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_nxv4i8_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
-; CHECK-NEXT: vmv1r.v v18, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vsuxseg7ei32.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_mask_nxv4i8_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
-; CHECK-NEXT: vmv1r.v v18, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vsuxseg7ei32.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_nxv4i8_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vsuxseg8ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_mask_nxv4i8_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vsuxseg8ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_nxv4i8_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vsuxseg8ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_mask_nxv4i8_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vsuxseg8ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_nxv4i8_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
-; CHECK-NEXT: vmv1r.v v18, v12
-; CHECK-NEXT: vmv1r.v v19, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
+; CHECK-NEXT: vmv1r.v v19, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vsuxseg8ei32.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_mask_nxv4i8_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
-; CHECK-NEXT: vmv1r.v v18, v12
-; CHECK-NEXT: vmv1r.v v19, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
+; CHECK-NEXT: vmv1r.v v19, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vsuxseg8ei32.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
define void @test_vsuxseg2_nxv1i16_nxv1i8(<vscale x 1 x i16> %val, i16* %base, <vscale x 1 x i8> %index, i32 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv1i16_nxv1i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
define void @test_vsuxseg2_mask_nxv1i16_nxv1i8(<vscale x 1 x i16> %val, i16* %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv1i16_nxv1i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
define void @test_vsuxseg2_nxv1i16_nxv1i32(<vscale x 1 x i16> %val, i16* %base, <vscale x 1 x i32> %index, i32 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv1i16_nxv1i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
define void @test_vsuxseg2_mask_nxv1i16_nxv1i32(<vscale x 1 x i16> %val, i16* %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv1i16_nxv1i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
define void @test_vsuxseg2_nxv1i16_nxv1i16(<vscale x 1 x i16> %val, i16* %base, <vscale x 1 x i16> %index, i32 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv1i16_nxv1i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
define void @test_vsuxseg2_mask_nxv1i16_nxv1i16(<vscale x 1 x i16> %val, i16* %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv1i16_nxv1i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-LABEL: test_vsuxseg3_nxv1i16_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vsuxseg3ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_mask_nxv1i16_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vsuxseg3ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_nxv1i16_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vsuxseg3ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_mask_nxv1i16_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vsuxseg3ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_nxv1i16_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vsuxseg3ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_mask_nxv1i16_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vsuxseg3ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_nxv1i16_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vsuxseg4ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_mask_nxv1i16_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vsuxseg4ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_nxv1i16_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vsuxseg4ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_mask_nxv1i16_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vsuxseg4ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_nxv1i16_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vsuxseg4ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_mask_nxv1i16_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vsuxseg4ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_nxv1i16_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vsuxseg5ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_mask_nxv1i16_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vsuxseg5ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_nxv1i16_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vsuxseg5ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_mask_nxv1i16_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vsuxseg5ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_nxv1i16_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vsuxseg5ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_mask_nxv1i16_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vsuxseg5ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_nxv1i16_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vsuxseg6ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_mask_nxv1i16_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vsuxseg6ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_nxv1i16_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vsuxseg6ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_mask_nxv1i16_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vsuxseg6ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_nxv1i16_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vsuxseg6ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_mask_nxv1i16_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vsuxseg6ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_nxv1i16_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vsuxseg7ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_mask_nxv1i16_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vsuxseg7ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_nxv1i16_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vsuxseg7ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_mask_nxv1i16_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vsuxseg7ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_nxv1i16_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vsuxseg7ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_mask_nxv1i16_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vsuxseg7ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_nxv1i16_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vsuxseg8ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_mask_nxv1i16_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vsuxseg8ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_nxv1i16_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vsuxseg8ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_mask_nxv1i16_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vsuxseg8ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_nxv1i16_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vsuxseg8ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_mask_nxv1i16_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vsuxseg8ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
define void @test_vsuxseg2_nxv32i8_nxv32i16(<vscale x 32 x i8> %val, i8* %base, <vscale x 32 x i16> %index, i32 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv32i8_nxv32i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu
; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v16
define void @test_vsuxseg2_mask_nxv32i8_nxv32i16(<vscale x 32 x i8> %val, i8* %base, <vscale x 32 x i16> %index, <vscale x 32 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv32i8_nxv32i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu
; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v16, v0.t
define void @test_vsuxseg2_nxv32i8_nxv32i8(<vscale x 32 x i8> %val, i8* %base, <vscale x 32 x i8> %index, i32 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv32i8_nxv32i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
; CHECK-NEXT: vmv4r.v v16, v12
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu
define void @test_vsuxseg2_mask_nxv32i8_nxv32i8(<vscale x 32 x i8> %val, i8* %base, <vscale x 32 x i8> %index, <vscale x 32 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv32i8_nxv32i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
; CHECK-NEXT: vmv4r.v v16, v12
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu
define void @test_vsuxseg2_nxv2i8_nxv2i32(<vscale x 2 x i8> %val, i8* %base, <vscale x 2 x i32> %index, i32 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv2i8_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
define void @test_vsuxseg2_mask_nxv2i8_nxv2i32(<vscale x 2 x i8> %val, i8* %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv2i8_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
define void @test_vsuxseg2_nxv2i8_nxv2i8(<vscale x 2 x i8> %val, i8* %base, <vscale x 2 x i8> %index, i32 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv2i8_nxv2i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
define void @test_vsuxseg2_mask_nxv2i8_nxv2i8(<vscale x 2 x i8> %val, i8* %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv2i8_nxv2i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
define void @test_vsuxseg2_nxv2i8_nxv2i16(<vscale x 2 x i8> %val, i8* %base, <vscale x 2 x i16> %index, i32 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv2i8_nxv2i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
define void @test_vsuxseg2_mask_nxv2i8_nxv2i16(<vscale x 2 x i8> %val, i8* %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv2i8_nxv2i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-LABEL: test_vsuxseg3_nxv2i8_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vsuxseg3ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_mask_nxv2i8_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vsuxseg3ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_nxv2i8_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vsuxseg3ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_mask_nxv2i8_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vsuxseg3ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_nxv2i8_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vsuxseg3ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_mask_nxv2i8_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vsuxseg3ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_nxv2i8_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vsuxseg4ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_mask_nxv2i8_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vsuxseg4ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_nxv2i8_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vsuxseg4ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_mask_nxv2i8_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vsuxseg4ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_nxv2i8_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vsuxseg4ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_mask_nxv2i8_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vsuxseg4ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_nxv2i8_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vsuxseg5ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_mask_nxv2i8_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vsuxseg5ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_nxv2i8_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vsuxseg5ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_mask_nxv2i8_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vsuxseg5ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_nxv2i8_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vsuxseg5ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_mask_nxv2i8_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vsuxseg5ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_nxv2i8_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vsuxseg6ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_mask_nxv2i8_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vsuxseg6ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_nxv2i8_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vsuxseg6ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_mask_nxv2i8_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vsuxseg6ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_nxv2i8_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vsuxseg6ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_mask_nxv2i8_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vsuxseg6ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_nxv2i8_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vsuxseg7ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_mask_nxv2i8_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vsuxseg7ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_nxv2i8_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vsuxseg7ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_mask_nxv2i8_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vsuxseg7ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_nxv2i8_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vsuxseg7ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_mask_nxv2i8_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vsuxseg7ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_nxv2i8_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vsuxseg8ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_mask_nxv2i8_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vsuxseg8ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_nxv2i8_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vsuxseg8ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_mask_nxv2i8_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vsuxseg8ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_nxv2i8_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vsuxseg8ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_mask_nxv2i8_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vsuxseg8ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
define void @test_vsuxseg2_nxv2i16_nxv2i32(<vscale x 2 x i16> %val, i16* %base, <vscale x 2 x i32> %index, i32 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv2i16_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
define void @test_vsuxseg2_mask_nxv2i16_nxv2i32(<vscale x 2 x i16> %val, i16* %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv2i16_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
define void @test_vsuxseg2_nxv2i16_nxv2i8(<vscale x 2 x i16> %val, i16* %base, <vscale x 2 x i8> %index, i32 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv2i16_nxv2i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
define void @test_vsuxseg2_mask_nxv2i16_nxv2i8(<vscale x 2 x i16> %val, i16* %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv2i16_nxv2i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
define void @test_vsuxseg2_nxv2i16_nxv2i16(<vscale x 2 x i16> %val, i16* %base, <vscale x 2 x i16> %index, i32 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv2i16_nxv2i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
define void @test_vsuxseg2_mask_nxv2i16_nxv2i16(<vscale x 2 x i16> %val, i16* %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv2i16_nxv2i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-LABEL: test_vsuxseg3_nxv2i16_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vsuxseg3ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_mask_nxv2i16_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vsuxseg3ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_nxv2i16_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vsuxseg3ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_mask_nxv2i16_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vsuxseg3ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_nxv2i16_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vsuxseg3ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_mask_nxv2i16_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vsuxseg3ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_nxv2i16_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vsuxseg4ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_mask_nxv2i16_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vsuxseg4ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_nxv2i16_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vsuxseg4ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_mask_nxv2i16_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vsuxseg4ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_nxv2i16_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vsuxseg4ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_mask_nxv2i16_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vsuxseg4ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_nxv2i16_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vsuxseg5ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_mask_nxv2i16_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vsuxseg5ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_nxv2i16_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vsuxseg5ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_mask_nxv2i16_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vsuxseg5ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_nxv2i16_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vsuxseg5ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_mask_nxv2i16_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vsuxseg5ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_nxv2i16_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vsuxseg6ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_mask_nxv2i16_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vsuxseg6ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_nxv2i16_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vsuxseg6ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_mask_nxv2i16_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vsuxseg6ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_nxv2i16_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vsuxseg6ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_mask_nxv2i16_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vsuxseg6ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_nxv2i16_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vsuxseg7ei32.v v10, (a0), v9
; CHECK-NEXT: ret
entry:
; CHECK-LABEL: test_vsuxseg7_mask_nxv2i16_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vsuxseg7ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_nxv2i16_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vsuxseg7ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_mask_nxv2i16_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vsuxseg7ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_nxv2i16_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vsuxseg7ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_mask_nxv2i16_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vsuxseg7ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_nxv2i16_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vsuxseg8ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_mask_nxv2i16_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vsuxseg8ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_nxv2i16_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vsuxseg8ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_mask_nxv2i16_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vsuxseg8ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_nxv2i16_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vsuxseg8ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_mask_nxv2i16_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vsuxseg8ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
define void @test_vsuxseg2_nxv4i32_nxv4i16(<vscale x 4 x i32> %val, i32* %base, <vscale x 4 x i16> %index, i32 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv4i32_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
; CHECK-NEXT: vmv1r.v v12, v10
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
define void @test_vsuxseg2_mask_nxv4i32_nxv4i16(<vscale x 4 x i32> %val, i32* %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv4i32_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
; CHECK-NEXT: vmv1r.v v12, v10
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
define void @test_vsuxseg2_nxv4i32_nxv4i8(<vscale x 4 x i32> %val, i32* %base, <vscale x 4 x i8> %index, i32 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv4i32_nxv4i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
; CHECK-NEXT: vmv1r.v v12, v10
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
define void @test_vsuxseg2_mask_nxv4i32_nxv4i8(<vscale x 4 x i32> %val, i32* %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv4i32_nxv4i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
; CHECK-NEXT: vmv1r.v v12, v10
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
define void @test_vsuxseg2_nxv4i32_nxv4i32(<vscale x 4 x i32> %val, i32* %base, <vscale x 4 x i32> %index, i32 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv4i32_nxv4i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
; CHECK-NEXT: vmv2r.v v12, v10
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
define void @test_vsuxseg2_mask_nxv4i32_nxv4i32(<vscale x 4 x i32> %val, i32* %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv4i32_nxv4i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
; CHECK-NEXT: vmv2r.v v12, v10
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; CHECK-LABEL: test_vsuxseg3_nxv4i32_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; CHECK-NEXT: vsuxseg3ei16.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_mask_nxv4i32_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; CHECK-NEXT: vsuxseg3ei16.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_nxv4i32_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; CHECK-NEXT: vsuxseg3ei8.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_mask_nxv4i32_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; CHECK-NEXT: vsuxseg3ei8.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_nxv4i32_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; CHECK-NEXT: vsuxseg3ei32.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_mask_nxv4i32_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; CHECK-NEXT: vsuxseg3ei32.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_nxv4i32_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; CHECK-NEXT: vsuxseg4ei16.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_mask_nxv4i32_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; CHECK-NEXT: vsuxseg4ei16.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_nxv4i32_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; CHECK-NEXT: vsuxseg4ei8.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_mask_nxv4i32_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; CHECK-NEXT: vsuxseg4ei8.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_nxv4i32_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; CHECK-NEXT: vsuxseg4ei32.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_mask_nxv4i32_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; CHECK-NEXT: vsuxseg4ei32.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
define void @test_vsuxseg2_nxv16f16_nxv16i16(<vscale x 16 x half> %val, half* %base, <vscale x 16 x i16> %index, i32 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv16f16_nxv16i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
; CHECK-NEXT: vmv4r.v v16, v12
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
define void @test_vsuxseg2_mask_nxv16f16_nxv16i16(<vscale x 16 x half> %val, half* %base, <vscale x 16 x i16> %index, <vscale x 16 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv16f16_nxv16i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
; CHECK-NEXT: vmv4r.v v16, v12
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
define void @test_vsuxseg2_nxv16f16_nxv16i8(<vscale x 16 x half> %val, half* %base, <vscale x 16 x i8> %index, i32 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv16f16_nxv16i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
; CHECK-NEXT: vmv2r.v v16, v12
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
define void @test_vsuxseg2_mask_nxv16f16_nxv16i8(<vscale x 16 x half> %val, half* %base, <vscale x 16 x i8> %index, <vscale x 16 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv16f16_nxv16i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
; CHECK-NEXT: vmv2r.v v16, v12
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
define void @test_vsuxseg2_nxv16f16_nxv16i32(<vscale x 16 x half> %val, half* %base, <vscale x 16 x i32> %index, i32 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv16f16_nxv16i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v16
define void @test_vsuxseg2_mask_nxv16f16_nxv16i32(<vscale x 16 x half> %val, half* %base, <vscale x 16 x i32> %index, <vscale x 16 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv16f16_nxv16i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v16, v0.t
define void @test_vsuxseg2_nxv4f64_nxv4i16(<vscale x 4 x double> %val, double* %base, <vscale x 4 x i16> %index, i32 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv4f64_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
; CHECK-NEXT: vmv1r.v v16, v12
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu
define void @test_vsuxseg2_mask_nxv4f64_nxv4i16(<vscale x 4 x double> %val, double* %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv4f64_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
; CHECK-NEXT: vmv1r.v v16, v12
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu
define void @test_vsuxseg2_nxv4f64_nxv4i8(<vscale x 4 x double> %val, double* %base, <vscale x 4 x i8> %index, i32 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv4f64_nxv4i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
; CHECK-NEXT: vmv1r.v v16, v12
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu
define void @test_vsuxseg2_mask_nxv4f64_nxv4i8(<vscale x 4 x double> %val, double* %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv4f64_nxv4i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
; CHECK-NEXT: vmv1r.v v16, v12
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu
define void @test_vsuxseg2_nxv4f64_nxv4i32(<vscale x 4 x double> %val, double* %base, <vscale x 4 x i32> %index, i32 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv4f64_nxv4i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
; CHECK-NEXT: vmv2r.v v16, v12
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu
define void @test_vsuxseg2_mask_nxv4f64_nxv4i32(<vscale x 4 x double> %val, double* %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv4f64_nxv4i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
; CHECK-NEXT: vmv2r.v v16, v12
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu
define void @test_vsuxseg2_nxv1f64_nxv1i8(<vscale x 1 x double> %val, double* %base, <vscale x 1 x i8> %index, i32 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv1f64_nxv1i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
define void @test_vsuxseg2_mask_nxv1f64_nxv1i8(<vscale x 1 x double> %val, double* %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv1f64_nxv1i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
define void @test_vsuxseg2_nxv1f64_nxv1i32(<vscale x 1 x double> %val, double* %base, <vscale x 1 x i32> %index, i32 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv1f64_nxv1i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
define void @test_vsuxseg2_mask_nxv1f64_nxv1i32(<vscale x 1 x double> %val, double* %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv1f64_nxv1i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
define void @test_vsuxseg2_nxv1f64_nxv1i16(<vscale x 1 x double> %val, double* %base, <vscale x 1 x i16> %index, i32 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv1f64_nxv1i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
define void @test_vsuxseg2_mask_nxv1f64_nxv1i16(<vscale x 1 x double> %val, double* %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv1f64_nxv1i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-LABEL: test_vsuxseg3_nxv1f64_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vsuxseg3ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_mask_nxv1f64_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vsuxseg3ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_nxv1f64_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vsuxseg3ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_mask_nxv1f64_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vsuxseg3ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_nxv1f64_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vsuxseg3ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_mask_nxv1f64_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vsuxseg3ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_nxv1f64_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vsuxseg4ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_mask_nxv1f64_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vsuxseg4ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_nxv1f64_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vsuxseg4ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_mask_nxv1f64_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vsuxseg4ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_nxv1f64_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vsuxseg4ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_mask_nxv1f64_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vsuxseg4ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_nxv1f64_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vsuxseg5ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_mask_nxv1f64_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vsuxseg5ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_nxv1f64_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vsuxseg5ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_mask_nxv1f64_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vsuxseg5ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_nxv1f64_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vsuxseg5ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_mask_nxv1f64_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vsuxseg5ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_nxv1f64_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vsuxseg6ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_mask_nxv1f64_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vsuxseg6ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_nxv1f64_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vsuxseg6ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_mask_nxv1f64_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vsuxseg6ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_nxv1f64_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vsuxseg6ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_mask_nxv1f64_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vsuxseg6ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_nxv1f64_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vsuxseg7ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_mask_nxv1f64_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vsuxseg7ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_nxv1f64_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vsuxseg7ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_mask_nxv1f64_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vsuxseg7ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_nxv1f64_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vsuxseg7ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_mask_nxv1f64_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vsuxseg7ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_nxv1f64_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vsuxseg8ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_mask_nxv1f64_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vsuxseg8ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_nxv1f64_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vsuxseg8ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_mask_nxv1f64_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vsuxseg8ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_nxv1f64_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vsuxseg8ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_mask_nxv1f64_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vsuxseg8ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
define void @test_vsuxseg2_nxv2f32_nxv2i32(<vscale x 2 x float> %val, float* %base, <vscale x 2 x i32> %index, i32 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv2f32_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
define void @test_vsuxseg2_mask_nxv2f32_nxv2i32(<vscale x 2 x float> %val, float* %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv2f32_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
define void @test_vsuxseg2_nxv2f32_nxv2i8(<vscale x 2 x float> %val, float* %base, <vscale x 2 x i8> %index, i32 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv2f32_nxv2i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
define void @test_vsuxseg2_mask_nxv2f32_nxv2i8(<vscale x 2 x float> %val, float* %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv2f32_nxv2i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
define void @test_vsuxseg2_nxv2f32_nxv2i16(<vscale x 2 x float> %val, float* %base, <vscale x 2 x i16> %index, i32 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv2f32_nxv2i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
define void @test_vsuxseg2_mask_nxv2f32_nxv2i16(<vscale x 2 x float> %val, float* %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv2f32_nxv2i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-LABEL: test_vsuxseg3_nxv2f32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vsuxseg3ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_mask_nxv2f32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vsuxseg3ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_nxv2f32_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vsuxseg3ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_mask_nxv2f32_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vsuxseg3ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_nxv2f32_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vsuxseg3ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_mask_nxv2f32_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vsuxseg3ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_nxv2f32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vsuxseg4ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_mask_nxv2f32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vsuxseg4ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_nxv2f32_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vsuxseg4ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_mask_nxv2f32_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vsuxseg4ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_nxv2f32_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vsuxseg4ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_mask_nxv2f32_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vsuxseg4ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_nxv2f32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vsuxseg5ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_mask_nxv2f32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vsuxseg5ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_nxv2f32_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vsuxseg5ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_mask_nxv2f32_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vsuxseg5ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_nxv2f32_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vsuxseg5ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_mask_nxv2f32_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vsuxseg5ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_nxv2f32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vsuxseg6ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_mask_nxv2f32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vsuxseg6ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_nxv2f32_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vsuxseg6ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_mask_nxv2f32_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vsuxseg6ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_nxv2f32_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vsuxseg6ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_mask_nxv2f32_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vsuxseg6ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_nxv2f32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vsuxseg7ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_mask_nxv2f32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vsuxseg7ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_nxv2f32_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vsuxseg7ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_mask_nxv2f32_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vsuxseg7ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_nxv2f32_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vsuxseg7ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_mask_nxv2f32_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vsuxseg7ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_nxv2f32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vsuxseg8ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_mask_nxv2f32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vsuxseg8ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_nxv2f32_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vsuxseg8ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_mask_nxv2f32_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vsuxseg8ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_nxv2f32_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vsuxseg8ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_mask_nxv2f32_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vsuxseg8ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
define void @test_vsuxseg2_nxv1f16_nxv1i8(<vscale x 1 x half> %val, half* %base, <vscale x 1 x i8> %index, i32 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv1f16_nxv1i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
define void @test_vsuxseg2_mask_nxv1f16_nxv1i8(<vscale x 1 x half> %val, half* %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv1f16_nxv1i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
define void @test_vsuxseg2_nxv1f16_nxv1i32(<vscale x 1 x half> %val, half* %base, <vscale x 1 x i32> %index, i32 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv1f16_nxv1i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
define void @test_vsuxseg2_mask_nxv1f16_nxv1i32(<vscale x 1 x half> %val, half* %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv1f16_nxv1i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
define void @test_vsuxseg2_nxv1f16_nxv1i16(<vscale x 1 x half> %val, half* %base, <vscale x 1 x i16> %index, i32 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv1f16_nxv1i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
define void @test_vsuxseg2_mask_nxv1f16_nxv1i16(<vscale x 1 x half> %val, half* %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv1f16_nxv1i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-LABEL: test_vsuxseg3_nxv1f16_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vsuxseg3ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_mask_nxv1f16_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vsuxseg3ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_nxv1f16_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vsuxseg3ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_mask_nxv1f16_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vsuxseg3ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_nxv1f16_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vsuxseg3ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_mask_nxv1f16_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vsuxseg3ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_nxv1f16_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vsuxseg4ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_mask_nxv1f16_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vsuxseg4ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_nxv1f16_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vsuxseg4ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_mask_nxv1f16_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vsuxseg4ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_nxv1f16_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vsuxseg4ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_mask_nxv1f16_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vsuxseg4ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_nxv1f16_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vsuxseg5ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_mask_nxv1f16_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vsuxseg5ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_nxv1f16_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vsuxseg5ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_mask_nxv1f16_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vsuxseg5ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_nxv1f16_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vsuxseg5ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_mask_nxv1f16_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vsuxseg5ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_nxv1f16_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vsuxseg6ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_mask_nxv1f16_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vsuxseg6ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_nxv1f16_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vsuxseg6ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_mask_nxv1f16_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vsuxseg6ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_nxv1f16_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vsuxseg6ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_mask_nxv1f16_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vsuxseg6ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_nxv1f16_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vsuxseg7ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_mask_nxv1f16_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vsuxseg7ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_nxv1f16_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vsuxseg7ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_mask_nxv1f16_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vsuxseg7ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_nxv1f16_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vsuxseg7ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_mask_nxv1f16_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vsuxseg7ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_nxv1f16_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vsuxseg8ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_mask_nxv1f16_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vsuxseg8ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_nxv1f16_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vsuxseg8ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_mask_nxv1f16_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vsuxseg8ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_nxv1f16_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vsuxseg8ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_mask_nxv1f16_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vsuxseg8ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
define void @test_vsuxseg2_nxv1f32_nxv1i8(<vscale x 1 x float> %val, float* %base, <vscale x 1 x i8> %index, i32 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv1f32_nxv1i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
define void @test_vsuxseg2_mask_nxv1f32_nxv1i8(<vscale x 1 x float> %val, float* %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv1f32_nxv1i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
define void @test_vsuxseg2_nxv1f32_nxv1i32(<vscale x 1 x float> %val, float* %base, <vscale x 1 x i32> %index, i32 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv1f32_nxv1i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
define void @test_vsuxseg2_mask_nxv1f32_nxv1i32(<vscale x 1 x float> %val, float* %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv1f32_nxv1i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
define void @test_vsuxseg2_nxv1f32_nxv1i16(<vscale x 1 x float> %val, float* %base, <vscale x 1 x i16> %index, i32 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv1f32_nxv1i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
define void @test_vsuxseg2_mask_nxv1f32_nxv1i16(<vscale x 1 x float> %val, float* %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv1f32_nxv1i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-LABEL: test_vsuxseg3_nxv1f32_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vsuxseg3ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_mask_nxv1f32_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vsuxseg3ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_nxv1f32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vsuxseg3ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_mask_nxv1f32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vsuxseg3ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_nxv1f32_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vsuxseg3ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_mask_nxv1f32_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vsuxseg3ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_nxv1f32_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vsuxseg4ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_mask_nxv1f32_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vsuxseg4ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_nxv1f32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vsuxseg4ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_mask_nxv1f32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vsuxseg4ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_nxv1f32_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vsuxseg4ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_mask_nxv1f32_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vsuxseg4ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_nxv1f32_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vsuxseg5ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_mask_nxv1f32_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vsuxseg5ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_nxv1f32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vsuxseg5ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_mask_nxv1f32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vsuxseg5ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_nxv1f32_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vsuxseg5ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_mask_nxv1f32_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vsuxseg5ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_nxv1f32_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vsuxseg6ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_mask_nxv1f32_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vsuxseg6ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_nxv1f32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vsuxseg6ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_mask_nxv1f32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vsuxseg6ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_nxv1f32_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vsuxseg6ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_mask_nxv1f32_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vsuxseg6ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_nxv1f32_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vsuxseg7ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_mask_nxv1f32_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vsuxseg7ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_nxv1f32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vsuxseg7ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_mask_nxv1f32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vsuxseg7ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_nxv1f32_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vsuxseg7ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_mask_nxv1f32_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vsuxseg7ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_nxv1f32_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vsuxseg8ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_mask_nxv1f32_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vsuxseg8ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_nxv1f32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vsuxseg8ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_mask_nxv1f32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vsuxseg8ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_nxv1f32_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vsuxseg8ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_mask_nxv1f32_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vsuxseg8ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
define void @test_vsuxseg2_nxv8f16_nxv8i16(<vscale x 8 x half> %val, half* %base, <vscale x 8 x i16> %index, i32 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv8f16_nxv8i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
; CHECK-NEXT: vmv2r.v v12, v10
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
define void @test_vsuxseg2_mask_nxv8f16_nxv8i16(<vscale x 8 x half> %val, half* %base, <vscale x 8 x i16> %index, <vscale x 8 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv8f16_nxv8i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
; CHECK-NEXT: vmv2r.v v12, v10
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
define void @test_vsuxseg2_nxv8f16_nxv8i8(<vscale x 8 x half> %val, half* %base, <vscale x 8 x i8> %index, i32 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv8f16_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
; CHECK-NEXT: vmv1r.v v12, v10
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
define void @test_vsuxseg2_mask_nxv8f16_nxv8i8(<vscale x 8 x half> %val, half* %base, <vscale x 8 x i8> %index, <vscale x 8 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv8f16_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
; CHECK-NEXT: vmv1r.v v12, v10
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
define void @test_vsuxseg2_nxv8f16_nxv8i32(<vscale x 8 x half> %val, half* %base, <vscale x 8 x i32> %index, i32 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv8f16_nxv8i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v12
define void @test_vsuxseg2_mask_nxv8f16_nxv8i32(<vscale x 8 x half> %val, half* %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv8f16_nxv8i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v12, v0.t
; CHECK-LABEL: test_vsuxseg3_nxv8f16_nxv8i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
; CHECK-NEXT: vsuxseg3ei16.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_mask_nxv8f16_nxv8i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
; CHECK-NEXT: vsuxseg3ei16.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_nxv8f16_nxv8i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
; CHECK-NEXT: vsuxseg3ei8.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_mask_nxv8f16_nxv8i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
; CHECK-NEXT: vsuxseg3ei8.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
define void @test_vsuxseg3_nxv8f16_nxv8i32(<vscale x 8 x half> %val, half* %base, <vscale x 8 x i32> %index, i32 %vl) {
; CHECK-LABEL: test_vsuxseg3_nxv8f16_nxv8i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv2r.v v16, v8
-; CHECK-NEXT: vmv2r.v v18, v16
-; CHECK-NEXT: vmv2r.v v20, v16
+; CHECK-NEXT: vmv2r.v v10, v8
+; CHECK-NEXT: vmv4r.v v16, v12
+; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
-; CHECK-NEXT: vsuxseg3ei32.v v16, (a0), v12
+; CHECK-NEXT: vsuxseg3ei32.v v8, (a0), v16
; CHECK-NEXT: ret
entry:
tail call void @llvm.riscv.vsuxseg3.nxv8f16.nxv8i32(<vscale x 8 x half> %val,<vscale x 8 x half> %val,<vscale x 8 x half> %val, half* %base, <vscale x 8 x i32> %index, i32 %vl)
define void @test_vsuxseg3_mask_nxv8f16_nxv8i32(<vscale x 8 x half> %val, half* %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsuxseg3_mask_nxv8f16_nxv8i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv2r.v v16, v8
-; CHECK-NEXT: vmv2r.v v18, v16
-; CHECK-NEXT: vmv2r.v v20, v16
+; CHECK-NEXT: vmv2r.v v10, v8
+; CHECK-NEXT: vmv4r.v v16, v12
+; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
-; CHECK-NEXT: vsuxseg3ei32.v v16, (a0), v12, v0.t
+; CHECK-NEXT: vsuxseg3ei32.v v8, (a0), v16, v0.t
; CHECK-NEXT: ret
entry:
tail call void @llvm.riscv.vsuxseg3.mask.nxv8f16.nxv8i32(<vscale x 8 x half> %val,<vscale x 8 x half> %val,<vscale x 8 x half> %val, half* %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i32 %vl)
; CHECK-LABEL: test_vsuxseg4_nxv8f16_nxv8i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
; CHECK-NEXT: vsuxseg4ei16.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_mask_nxv8f16_nxv8i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
; CHECK-NEXT: vsuxseg4ei16.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_nxv8f16_nxv8i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
; CHECK-NEXT: vsuxseg4ei8.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_mask_nxv8f16_nxv8i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
; CHECK-NEXT: vsuxseg4ei8.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_nxv8f16_nxv8i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v16, v8
-; CHECK-NEXT: vmv2r.v v18, v16
-; CHECK-NEXT: vmv2r.v v20, v16
-; CHECK-NEXT: vmv2r.v v22, v16
+; CHECK-NEXT: vmv2r.v v18, v8
+; CHECK-NEXT: vmv2r.v v20, v8
+; CHECK-NEXT: vmv2r.v v22, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
; CHECK-NEXT: vsuxseg4ei32.v v16, (a0), v12
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_mask_nxv8f16_nxv8i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v16, v8
-; CHECK-NEXT: vmv2r.v v18, v16
-; CHECK-NEXT: vmv2r.v v20, v16
-; CHECK-NEXT: vmv2r.v v22, v16
+; CHECK-NEXT: vmv2r.v v18, v8
+; CHECK-NEXT: vmv2r.v v20, v8
+; CHECK-NEXT: vmv2r.v v22, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
; CHECK-NEXT: vsuxseg4ei32.v v16, (a0), v12, v0.t
; CHECK-NEXT: ret
define void @test_vsuxseg2_nxv8f32_nxv8i16(<vscale x 8 x float> %val, float* %base, <vscale x 8 x i16> %index, i32 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv8f32_nxv8i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
; CHECK-NEXT: vmv2r.v v16, v12
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
define void @test_vsuxseg2_mask_nxv8f32_nxv8i16(<vscale x 8 x float> %val, float* %base, <vscale x 8 x i16> %index, <vscale x 8 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv8f32_nxv8i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
; CHECK-NEXT: vmv2r.v v16, v12
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
define void @test_vsuxseg2_nxv8f32_nxv8i8(<vscale x 8 x float> %val, float* %base, <vscale x 8 x i8> %index, i32 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv8f32_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
; CHECK-NEXT: vmv1r.v v16, v12
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
define void @test_vsuxseg2_mask_nxv8f32_nxv8i8(<vscale x 8 x float> %val, float* %base, <vscale x 8 x i8> %index, <vscale x 8 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv8f32_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
; CHECK-NEXT: vmv1r.v v16, v12
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
define void @test_vsuxseg2_nxv8f32_nxv8i32(<vscale x 8 x float> %val, float* %base, <vscale x 8 x i32> %index, i32 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv8f32_nxv8i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
; CHECK-NEXT: vmv4r.v v16, v12
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
define void @test_vsuxseg2_mask_nxv8f32_nxv8i32(<vscale x 8 x float> %val, float* %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv8f32_nxv8i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
; CHECK-NEXT: vmv4r.v v16, v12
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
define void @test_vsuxseg2_nxv2f64_nxv2i32(<vscale x 2 x double> %val, double* %base, <vscale x 2 x i32> %index, i32 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv2f64_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
; CHECK-NEXT: vmv1r.v v12, v10
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
define void @test_vsuxseg2_mask_nxv2f64_nxv2i32(<vscale x 2 x double> %val, double* %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv2f64_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
; CHECK-NEXT: vmv1r.v v12, v10
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
define void @test_vsuxseg2_nxv2f64_nxv2i8(<vscale x 2 x double> %val, double* %base, <vscale x 2 x i8> %index, i32 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv2f64_nxv2i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
; CHECK-NEXT: vmv1r.v v12, v10
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
define void @test_vsuxseg2_mask_nxv2f64_nxv2i8(<vscale x 2 x double> %val, double* %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv2f64_nxv2i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
; CHECK-NEXT: vmv1r.v v12, v10
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
define void @test_vsuxseg2_nxv2f64_nxv2i16(<vscale x 2 x double> %val, double* %base, <vscale x 2 x i16> %index, i32 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv2f64_nxv2i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
; CHECK-NEXT: vmv1r.v v12, v10
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
define void @test_vsuxseg2_mask_nxv2f64_nxv2i16(<vscale x 2 x double> %val, double* %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv2f64_nxv2i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
; CHECK-NEXT: vmv1r.v v12, v10
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
; CHECK-LABEL: test_vsuxseg3_nxv2f64_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
; CHECK-NEXT: vsuxseg3ei32.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_mask_nxv2f64_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
; CHECK-NEXT: vsuxseg3ei32.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_nxv2f64_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
; CHECK-NEXT: vsuxseg3ei8.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_mask_nxv2f64_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
; CHECK-NEXT: vsuxseg3ei8.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_nxv2f64_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
; CHECK-NEXT: vsuxseg3ei16.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_mask_nxv2f64_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
; CHECK-NEXT: vsuxseg3ei16.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_nxv2f64_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
; CHECK-NEXT: vsuxseg4ei32.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_mask_nxv2f64_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
; CHECK-NEXT: vsuxseg4ei32.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_nxv2f64_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
; CHECK-NEXT: vsuxseg4ei8.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_mask_nxv2f64_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
; CHECK-NEXT: vsuxseg4ei8.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_nxv2f64_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
; CHECK-NEXT: vsuxseg4ei16.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_mask_nxv2f64_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
; CHECK-NEXT: vsuxseg4ei16.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
define void @test_vsuxseg2_nxv4f16_nxv4i16(<vscale x 4 x half> %val, half* %base, <vscale x 4 x i16> %index, i32 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv4f16_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
define void @test_vsuxseg2_mask_nxv4f16_nxv4i16(<vscale x 4 x half> %val, half* %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv4f16_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
define void @test_vsuxseg2_nxv4f16_nxv4i8(<vscale x 4 x half> %val, half* %base, <vscale x 4 x i8> %index, i32 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv4f16_nxv4i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
define void @test_vsuxseg2_mask_nxv4f16_nxv4i8(<vscale x 4 x half> %val, half* %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv4f16_nxv4i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
define void @test_vsuxseg2_nxv4f16_nxv4i32(<vscale x 4 x half> %val, half* %base, <vscale x 4 x i32> %index, i32 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv4f16_nxv4i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v10
define void @test_vsuxseg2_mask_nxv4f16_nxv4i32(<vscale x 4 x half> %val, half* %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv4f16_nxv4i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v10, v0.t
; CHECK-LABEL: test_vsuxseg3_nxv4f16_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vsuxseg3ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_mask_nxv4f16_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vsuxseg3ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_nxv4f16_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vsuxseg3ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_mask_nxv4f16_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vsuxseg3ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
define void @test_vsuxseg3_nxv4f16_nxv4i32(<vscale x 4 x half> %val, half* %base, <vscale x 4 x i32> %index, i32 %vl) {
; CHECK-LABEL: test_vsuxseg3_nxv4f16_nxv4i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv2r.v v12, v10
+; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
-; CHECK-NEXT: vsuxseg3ei32.v v12, (a0), v10
+; CHECK-NEXT: vsuxseg3ei32.v v8, (a0), v12
; CHECK-NEXT: ret
entry:
tail call void @llvm.riscv.vsuxseg3.nxv4f16.nxv4i32(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, <vscale x 4 x i32> %index, i32 %vl)
define void @test_vsuxseg3_mask_nxv4f16_nxv4i32(<vscale x 4 x half> %val, half* %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsuxseg3_mask_nxv4f16_nxv4i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv2r.v v12, v10
+; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
-; CHECK-NEXT: vsuxseg3ei32.v v12, (a0), v10, v0.t
+; CHECK-NEXT: vsuxseg3ei32.v v8, (a0), v12, v0.t
; CHECK-NEXT: ret
entry:
tail call void @llvm.riscv.vsuxseg3.mask.nxv4f16.nxv4i32(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl)
; CHECK-LABEL: test_vsuxseg4_nxv4f16_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vsuxseg4ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_mask_nxv4f16_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vsuxseg4ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_nxv4f16_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vsuxseg4ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_mask_nxv4f16_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vsuxseg4ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_nxv4f16_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vsuxseg4ei32.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_mask_nxv4f16_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vsuxseg4ei32.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_nxv4f16_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vsuxseg5ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_mask_nxv4f16_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vsuxseg5ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_nxv4f16_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vsuxseg5ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_mask_nxv4f16_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vsuxseg5ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_nxv4f16_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vsuxseg5ei32.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_mask_nxv4f16_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vsuxseg5ei32.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_nxv4f16_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vsuxseg6ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_mask_nxv4f16_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vsuxseg6ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_nxv4f16_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vsuxseg6ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_mask_nxv4f16_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vsuxseg6ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_nxv4f16_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vsuxseg6ei32.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_mask_nxv4f16_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vsuxseg6ei32.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_nxv4f16_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vsuxseg7ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_mask_nxv4f16_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vsuxseg7ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_nxv4f16_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vsuxseg7ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_mask_nxv4f16_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vsuxseg7ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_nxv4f16_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
-; CHECK-NEXT: vmv1r.v v18, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vsuxseg7ei32.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_mask_nxv4f16_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
-; CHECK-NEXT: vmv1r.v v18, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vsuxseg7ei32.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_nxv4f16_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vsuxseg8ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_mask_nxv4f16_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vsuxseg8ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_nxv4f16_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vsuxseg8ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_mask_nxv4f16_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vsuxseg8ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_nxv4f16_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
-; CHECK-NEXT: vmv1r.v v18, v12
-; CHECK-NEXT: vmv1r.v v19, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
+; CHECK-NEXT: vmv1r.v v19, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vsuxseg8ei32.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_mask_nxv4f16_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
-; CHECK-NEXT: vmv1r.v v18, v12
-; CHECK-NEXT: vmv1r.v v19, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
+; CHECK-NEXT: vmv1r.v v19, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vsuxseg8ei32.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
define void @test_vsuxseg2_nxv2f16_nxv2i32(<vscale x 2 x half> %val, half* %base, <vscale x 2 x i32> %index, i32 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv2f16_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
define void @test_vsuxseg2_mask_nxv2f16_nxv2i32(<vscale x 2 x half> %val, half* %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv2f16_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
define void @test_vsuxseg2_nxv2f16_nxv2i8(<vscale x 2 x half> %val, half* %base, <vscale x 2 x i8> %index, i32 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv2f16_nxv2i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
define void @test_vsuxseg2_mask_nxv2f16_nxv2i8(<vscale x 2 x half> %val, half* %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv2f16_nxv2i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
define void @test_vsuxseg2_nxv2f16_nxv2i16(<vscale x 2 x half> %val, half* %base, <vscale x 2 x i16> %index, i32 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv2f16_nxv2i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
define void @test_vsuxseg2_mask_nxv2f16_nxv2i16(<vscale x 2 x half> %val, half* %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv2f16_nxv2i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-LABEL: test_vsuxseg3_nxv2f16_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vsuxseg3ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_mask_nxv2f16_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vsuxseg3ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_nxv2f16_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vsuxseg3ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_mask_nxv2f16_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vsuxseg3ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_nxv2f16_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vsuxseg3ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_mask_nxv2f16_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vsuxseg3ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_nxv2f16_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vsuxseg4ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_mask_nxv2f16_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vsuxseg4ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_nxv2f16_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vsuxseg4ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_mask_nxv2f16_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vsuxseg4ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_nxv2f16_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vsuxseg4ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_mask_nxv2f16_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vsuxseg4ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_nxv2f16_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vsuxseg5ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_mask_nxv2f16_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vsuxseg5ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_nxv2f16_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vsuxseg5ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_mask_nxv2f16_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vsuxseg5ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_nxv2f16_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vsuxseg5ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_mask_nxv2f16_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vsuxseg5ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_nxv2f16_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vsuxseg6ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_mask_nxv2f16_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vsuxseg6ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_nxv2f16_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vsuxseg6ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_mask_nxv2f16_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vsuxseg6ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_nxv2f16_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vsuxseg6ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_mask_nxv2f16_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vsuxseg6ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_nxv2f16_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vsuxseg7ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_mask_nxv2f16_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vsuxseg7ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_nxv2f16_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vsuxseg7ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_mask_nxv2f16_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vsuxseg7ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_nxv2f16_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vsuxseg7ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_mask_nxv2f16_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vsuxseg7ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_nxv2f16_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vsuxseg8ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_mask_nxv2f16_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vsuxseg8ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_nxv2f16_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vsuxseg8ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_mask_nxv2f16_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vsuxseg8ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_nxv2f16_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vsuxseg8ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_mask_nxv2f16_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vsuxseg8ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
define void @test_vsuxseg2_nxv4f32_nxv4i16(<vscale x 4 x float> %val, float* %base, <vscale x 4 x i16> %index, i32 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv4f32_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
; CHECK-NEXT: vmv1r.v v12, v10
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
define void @test_vsuxseg2_mask_nxv4f32_nxv4i16(<vscale x 4 x float> %val, float* %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv4f32_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
; CHECK-NEXT: vmv1r.v v12, v10
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
define void @test_vsuxseg2_nxv4f32_nxv4i8(<vscale x 4 x float> %val, float* %base, <vscale x 4 x i8> %index, i32 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv4f32_nxv4i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
; CHECK-NEXT: vmv1r.v v12, v10
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
define void @test_vsuxseg2_mask_nxv4f32_nxv4i8(<vscale x 4 x float> %val, float* %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv4f32_nxv4i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
; CHECK-NEXT: vmv1r.v v12, v10
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
define void @test_vsuxseg2_nxv4f32_nxv4i32(<vscale x 4 x float> %val, float* %base, <vscale x 4 x i32> %index, i32 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv4f32_nxv4i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
; CHECK-NEXT: vmv2r.v v12, v10
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
define void @test_vsuxseg2_mask_nxv4f32_nxv4i32(<vscale x 4 x float> %val, float* %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv4f32_nxv4i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
; CHECK-NEXT: vmv2r.v v12, v10
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; CHECK-LABEL: test_vsuxseg3_nxv4f32_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; CHECK-NEXT: vsuxseg3ei16.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_mask_nxv4f32_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; CHECK-NEXT: vsuxseg3ei16.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_nxv4f32_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; CHECK-NEXT: vsuxseg3ei8.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_mask_nxv4f32_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; CHECK-NEXT: vsuxseg3ei8.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_nxv4f32_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; CHECK-NEXT: vsuxseg3ei32.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_mask_nxv4f32_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; CHECK-NEXT: vsuxseg3ei32.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_nxv4f32_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; CHECK-NEXT: vsuxseg4ei16.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_mask_nxv4f32_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; CHECK-NEXT: vsuxseg4ei16.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_nxv4f32_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; CHECK-NEXT: vsuxseg4ei8.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_mask_nxv4f32_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; CHECK-NEXT: vsuxseg4ei8.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_nxv4f32_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; CHECK-NEXT: vsuxseg4ei32.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_mask_nxv4f32_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; CHECK-NEXT: vsuxseg4ei32.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
define void @test_vsuxseg2_nxv16i16_nxv16i16(<vscale x 16 x i16> %val, i16* %base, <vscale x 16 x i16> %index, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv16i16_nxv16i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
; CHECK-NEXT: vmv4r.v v16, v12
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
define void @test_vsuxseg2_mask_nxv16i16_nxv16i16(<vscale x 16 x i16> %val, i16* %base, <vscale x 16 x i16> %index, <vscale x 16 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv16i16_nxv16i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
; CHECK-NEXT: vmv4r.v v16, v12
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
define void @test_vsuxseg2_nxv16i16_nxv16i8(<vscale x 16 x i16> %val, i16* %base, <vscale x 16 x i8> %index, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv16i16_nxv16i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
; CHECK-NEXT: vmv2r.v v16, v12
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
define void @test_vsuxseg2_mask_nxv16i16_nxv16i8(<vscale x 16 x i16> %val, i16* %base, <vscale x 16 x i8> %index, <vscale x 16 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv16i16_nxv16i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
; CHECK-NEXT: vmv2r.v v16, v12
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
define void @test_vsuxseg2_nxv16i16_nxv16i32(<vscale x 16 x i16> %val, i16* %base, <vscale x 16 x i32> %index, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv16i16_nxv16i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v16
define void @test_vsuxseg2_mask_nxv16i16_nxv16i32(<vscale x 16 x i16> %val, i16* %base, <vscale x 16 x i32> %index, <vscale x 16 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv16i16_nxv16i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v16, v0.t
define void @test_vsuxseg2_nxv4i32_nxv4i32(<vscale x 4 x i32> %val, i32* %base, <vscale x 4 x i32> %index, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv4i32_nxv4i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
; CHECK-NEXT: vmv2r.v v12, v10
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
define void @test_vsuxseg2_mask_nxv4i32_nxv4i32(<vscale x 4 x i32> %val, i32* %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv4i32_nxv4i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
; CHECK-NEXT: vmv2r.v v12, v10
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
define void @test_vsuxseg2_nxv4i32_nxv4i8(<vscale x 4 x i32> %val, i32* %base, <vscale x 4 x i8> %index, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv4i32_nxv4i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
; CHECK-NEXT: vmv1r.v v12, v10
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
define void @test_vsuxseg2_mask_nxv4i32_nxv4i8(<vscale x 4 x i32> %val, i32* %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv4i32_nxv4i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
; CHECK-NEXT: vmv1r.v v12, v10
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
define void @test_vsuxseg2_nxv4i32_nxv4i64(<vscale x 4 x i32> %val, i32* %base, <vscale x 4 x i64> %index, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv4i32_nxv4i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; CHECK-NEXT: vsuxseg2ei64.v v8, (a0), v12
define void @test_vsuxseg2_mask_nxv4i32_nxv4i64(<vscale x 4 x i32> %val, i32* %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv4i32_nxv4i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; CHECK-NEXT: vsuxseg2ei64.v v8, (a0), v12, v0.t
define void @test_vsuxseg2_nxv4i32_nxv4i16(<vscale x 4 x i32> %val, i32* %base, <vscale x 4 x i16> %index, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv4i32_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
; CHECK-NEXT: vmv1r.v v12, v10
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
define void @test_vsuxseg2_mask_nxv4i32_nxv4i16(<vscale x 4 x i32> %val, i32* %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv4i32_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
; CHECK-NEXT: vmv1r.v v12, v10
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; CHECK-LABEL: test_vsuxseg3_nxv4i32_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; CHECK-NEXT: vsuxseg3ei32.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_mask_nxv4i32_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; CHECK-NEXT: vsuxseg3ei32.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_nxv4i32_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; CHECK-NEXT: vsuxseg3ei8.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_mask_nxv4i32_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; CHECK-NEXT: vsuxseg3ei8.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
define void @test_vsuxseg3_nxv4i32_nxv4i64(<vscale x 4 x i32> %val, i32* %base, <vscale x 4 x i64> %index, i64 %vl) {
; CHECK-LABEL: test_vsuxseg3_nxv4i32_nxv4i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv2r.v v16, v8
-; CHECK-NEXT: vmv2r.v v18, v16
-; CHECK-NEXT: vmv2r.v v20, v16
+; CHECK-NEXT: vmv2r.v v10, v8
+; CHECK-NEXT: vmv4r.v v16, v12
+; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
-; CHECK-NEXT: vsuxseg3ei64.v v16, (a0), v12
+; CHECK-NEXT: vsuxseg3ei64.v v8, (a0), v16
; CHECK-NEXT: ret
entry:
tail call void @llvm.riscv.vsuxseg3.nxv4i32.nxv4i64(<vscale x 4 x i32> %val,<vscale x 4 x i32> %val,<vscale x 4 x i32> %val, i32* %base, <vscale x 4 x i64> %index, i64 %vl)
define void @test_vsuxseg3_mask_nxv4i32_nxv4i64(<vscale x 4 x i32> %val, i32* %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsuxseg3_mask_nxv4i32_nxv4i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv2r.v v16, v8
-; CHECK-NEXT: vmv2r.v v18, v16
-; CHECK-NEXT: vmv2r.v v20, v16
+; CHECK-NEXT: vmv2r.v v10, v8
+; CHECK-NEXT: vmv4r.v v16, v12
+; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
-; CHECK-NEXT: vsuxseg3ei64.v v16, (a0), v12, v0.t
+; CHECK-NEXT: vsuxseg3ei64.v v8, (a0), v16, v0.t
; CHECK-NEXT: ret
entry:
tail call void @llvm.riscv.vsuxseg3.mask.nxv4i32.nxv4i64(<vscale x 4 x i32> %val,<vscale x 4 x i32> %val,<vscale x 4 x i32> %val, i32* %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl)
; CHECK-LABEL: test_vsuxseg3_nxv4i32_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; CHECK-NEXT: vsuxseg3ei16.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_mask_nxv4i32_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; CHECK-NEXT: vsuxseg3ei16.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_nxv4i32_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; CHECK-NEXT: vsuxseg4ei32.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_mask_nxv4i32_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; CHECK-NEXT: vsuxseg4ei32.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_nxv4i32_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; CHECK-NEXT: vsuxseg4ei8.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_mask_nxv4i32_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; CHECK-NEXT: vsuxseg4ei8.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_nxv4i32_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v16, v8
-; CHECK-NEXT: vmv2r.v v18, v16
-; CHECK-NEXT: vmv2r.v v20, v16
-; CHECK-NEXT: vmv2r.v v22, v16
+; CHECK-NEXT: vmv2r.v v18, v8
+; CHECK-NEXT: vmv2r.v v20, v8
+; CHECK-NEXT: vmv2r.v v22, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; CHECK-NEXT: vsuxseg4ei64.v v16, (a0), v12
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_mask_nxv4i32_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v16, v8
-; CHECK-NEXT: vmv2r.v v18, v16
-; CHECK-NEXT: vmv2r.v v20, v16
-; CHECK-NEXT: vmv2r.v v22, v16
+; CHECK-NEXT: vmv2r.v v18, v8
+; CHECK-NEXT: vmv2r.v v20, v8
+; CHECK-NEXT: vmv2r.v v22, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; CHECK-NEXT: vsuxseg4ei64.v v16, (a0), v12, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_nxv4i32_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; CHECK-NEXT: vsuxseg4ei16.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_mask_nxv4i32_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; CHECK-NEXT: vsuxseg4ei16.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
define void @test_vsuxseg2_nxv16i8_nxv16i16(<vscale x 16 x i8> %val, i8* %base, <vscale x 16 x i16> %index, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv16i8_nxv16i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v12
define void @test_vsuxseg2_mask_nxv16i8_nxv16i16(<vscale x 16 x i8> %val, i8* %base, <vscale x 16 x i16> %index, <vscale x 16 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv16i8_nxv16i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v12, v0.t
define void @test_vsuxseg2_nxv16i8_nxv16i8(<vscale x 16 x i8> %val, i8* %base, <vscale x 16 x i8> %index, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv16i8_nxv16i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
; CHECK-NEXT: vmv2r.v v12, v10
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
define void @test_vsuxseg2_mask_nxv16i8_nxv16i8(<vscale x 16 x i8> %val, i8* %base, <vscale x 16 x i8> %index, <vscale x 16 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv16i8_nxv16i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
; CHECK-NEXT: vmv2r.v v12, v10
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
define void @test_vsuxseg2_nxv16i8_nxv16i32(<vscale x 16 x i8> %val, i8* %base, <vscale x 16 x i32> %index, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv16i8_nxv16i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v16
define void @test_vsuxseg2_mask_nxv16i8_nxv16i32(<vscale x 16 x i8> %val, i8* %base, <vscale x 16 x i32> %index, <vscale x 16 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv16i8_nxv16i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v16, v0.t
define void @test_vsuxseg3_nxv16i8_nxv16i16(<vscale x 16 x i8> %val, i8* %base, <vscale x 16 x i16> %index, i64 %vl) {
; CHECK-LABEL: test_vsuxseg3_nxv16i8_nxv16i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv2r.v v16, v8
-; CHECK-NEXT: vmv2r.v v18, v16
-; CHECK-NEXT: vmv2r.v v20, v16
+; CHECK-NEXT: vmv2r.v v10, v8
+; CHECK-NEXT: vmv4r.v v16, v12
+; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
-; CHECK-NEXT: vsuxseg3ei16.v v16, (a0), v12
+; CHECK-NEXT: vsuxseg3ei16.v v8, (a0), v16
; CHECK-NEXT: ret
entry:
tail call void @llvm.riscv.vsuxseg3.nxv16i8.nxv16i16(<vscale x 16 x i8> %val,<vscale x 16 x i8> %val,<vscale x 16 x i8> %val, i8* %base, <vscale x 16 x i16> %index, i64 %vl)
define void @test_vsuxseg3_mask_nxv16i8_nxv16i16(<vscale x 16 x i8> %val, i8* %base, <vscale x 16 x i16> %index, <vscale x 16 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsuxseg3_mask_nxv16i8_nxv16i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv2r.v v16, v8
-; CHECK-NEXT: vmv2r.v v18, v16
-; CHECK-NEXT: vmv2r.v v20, v16
+; CHECK-NEXT: vmv2r.v v10, v8
+; CHECK-NEXT: vmv4r.v v16, v12
+; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
-; CHECK-NEXT: vsuxseg3ei16.v v16, (a0), v12, v0.t
+; CHECK-NEXT: vsuxseg3ei16.v v8, (a0), v16, v0.t
; CHECK-NEXT: ret
entry:
tail call void @llvm.riscv.vsuxseg3.mask.nxv16i8.nxv16i16(<vscale x 16 x i8> %val,<vscale x 16 x i8> %val,<vscale x 16 x i8> %val, i8* %base, <vscale x 16 x i16> %index, <vscale x 16 x i1> %mask, i64 %vl)
; CHECK-LABEL: test_vsuxseg3_nxv16i8_nxv16i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
; CHECK-NEXT: vsuxseg3ei8.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_mask_nxv16i8_nxv16i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
; CHECK-NEXT: vsuxseg3ei8.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
define void @test_vsuxseg3_nxv16i8_nxv16i32(<vscale x 16 x i8> %val, i8* %base, <vscale x 16 x i32> %index, i64 %vl) {
; CHECK-LABEL: test_vsuxseg3_nxv16i8_nxv16i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
define void @test_vsuxseg3_mask_nxv16i8_nxv16i32(<vscale x 16 x i8> %val, i8* %base, <vscale x 16 x i32> %index, <vscale x 16 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsuxseg3_mask_nxv16i8_nxv16i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
; CHECK-LABEL: test_vsuxseg4_nxv16i8_nxv16i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v16, v8
-; CHECK-NEXT: vmv2r.v v18, v16
-; CHECK-NEXT: vmv2r.v v20, v16
-; CHECK-NEXT: vmv2r.v v22, v16
+; CHECK-NEXT: vmv2r.v v18, v8
+; CHECK-NEXT: vmv2r.v v20, v8
+; CHECK-NEXT: vmv2r.v v22, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
; CHECK-NEXT: vsuxseg4ei16.v v16, (a0), v12
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_mask_nxv16i8_nxv16i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v16, v8
-; CHECK-NEXT: vmv2r.v v18, v16
-; CHECK-NEXT: vmv2r.v v20, v16
-; CHECK-NEXT: vmv2r.v v22, v16
+; CHECK-NEXT: vmv2r.v v18, v8
+; CHECK-NEXT: vmv2r.v v20, v8
+; CHECK-NEXT: vmv2r.v v22, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
; CHECK-NEXT: vsuxseg4ei16.v v16, (a0), v12, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_nxv16i8_nxv16i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
; CHECK-NEXT: vsuxseg4ei8.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_mask_nxv16i8_nxv16i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
; CHECK-NEXT: vsuxseg4ei8.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
define void @test_vsuxseg4_nxv16i8_nxv16i32(<vscale x 16 x i8> %val, i8* %base, <vscale x 16 x i32> %index, i64 %vl) {
; CHECK-LABEL: test_vsuxseg4_nxv16i8_nxv16i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2_v14m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vmv2r.v v14, v8
define void @test_vsuxseg4_mask_nxv16i8_nxv16i32(<vscale x 16 x i8> %val, i8* %base, <vscale x 16 x i32> %index, <vscale x 16 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsuxseg4_mask_nxv16i8_nxv16i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2_v14m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vmv2r.v v14, v8
define void @test_vsuxseg2_nxv1i64_nxv1i64(<vscale x 1 x i64> %val, i64* %base, <vscale x 1 x i64> %index, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv1i64_nxv1i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
define void @test_vsuxseg2_mask_nxv1i64_nxv1i64(<vscale x 1 x i64> %val, i64* %base, <vscale x 1 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv1i64_nxv1i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
define void @test_vsuxseg2_nxv1i64_nxv1i32(<vscale x 1 x i64> %val, i64* %base, <vscale x 1 x i32> %index, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv1i64_nxv1i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
define void @test_vsuxseg2_mask_nxv1i64_nxv1i32(<vscale x 1 x i64> %val, i64* %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv1i64_nxv1i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
define void @test_vsuxseg2_nxv1i64_nxv1i16(<vscale x 1 x i64> %val, i64* %base, <vscale x 1 x i16> %index, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv1i64_nxv1i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
define void @test_vsuxseg2_mask_nxv1i64_nxv1i16(<vscale x 1 x i64> %val, i64* %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv1i64_nxv1i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
define void @test_vsuxseg2_nxv1i64_nxv1i8(<vscale x 1 x i64> %val, i64* %base, <vscale x 1 x i8> %index, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv1i64_nxv1i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
define void @test_vsuxseg2_mask_nxv1i64_nxv1i8(<vscale x 1 x i64> %val, i64* %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv1i64_nxv1i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-LABEL: test_vsuxseg3_nxv1i64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vsuxseg3ei64.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_mask_nxv1i64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vsuxseg3ei64.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_nxv1i64_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vsuxseg3ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_mask_nxv1i64_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vsuxseg3ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_nxv1i64_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vsuxseg3ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_mask_nxv1i64_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vsuxseg3ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_nxv1i64_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vsuxseg3ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_mask_nxv1i64_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vsuxseg3ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_nxv1i64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vsuxseg4ei64.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_mask_nxv1i64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vsuxseg4ei64.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_nxv1i64_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vsuxseg4ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_mask_nxv1i64_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vsuxseg4ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_nxv1i64_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vsuxseg4ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_mask_nxv1i64_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vsuxseg4ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_nxv1i64_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vsuxseg4ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_mask_nxv1i64_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vsuxseg4ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_nxv1i64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vsuxseg5ei64.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_mask_nxv1i64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vsuxseg5ei64.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_nxv1i64_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vsuxseg5ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_mask_nxv1i64_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vsuxseg5ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_nxv1i64_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vsuxseg5ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_mask_nxv1i64_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vsuxseg5ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_nxv1i64_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vsuxseg5ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_mask_nxv1i64_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vsuxseg5ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_nxv1i64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vsuxseg6ei64.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_mask_nxv1i64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vsuxseg6ei64.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_nxv1i64_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vsuxseg6ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_mask_nxv1i64_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vsuxseg6ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_nxv1i64_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vsuxseg6ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_mask_nxv1i64_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vsuxseg6ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_nxv1i64_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vsuxseg6ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_mask_nxv1i64_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vsuxseg6ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_nxv1i64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vsuxseg7ei64.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_mask_nxv1i64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vsuxseg7ei64.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_nxv1i64_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vsuxseg7ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_mask_nxv1i64_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vsuxseg7ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_nxv1i64_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vsuxseg7ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_mask_nxv1i64_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vsuxseg7ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_nxv1i64_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
-; CHECK-NEXT: vsuxseg7ei8.v v10, (a0), v9
-; CHECK-NEXT: ret
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
+; CHECK-NEXT: vsuxseg7ei8.v v10, (a0), v9
+; CHECK-NEXT: ret
entry:
tail call void @llvm.riscv.vsuxseg7.nxv1i64.nxv1i8(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, i64* %base, <vscale x 1 x i8> %index, i64 %vl)
ret void
; CHECK-LABEL: test_vsuxseg7_mask_nxv1i64_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vsuxseg7ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_nxv1i64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vsuxseg8ei64.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_mask_nxv1i64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vsuxseg8ei64.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_nxv1i64_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vsuxseg8ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_mask_nxv1i64_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vsuxseg8ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_nxv1i64_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vsuxseg8ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_mask_nxv1i64_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vsuxseg8ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_nxv1i64_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vsuxseg8ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_mask_nxv1i64_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vsuxseg8ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
define void @test_vsuxseg2_nxv1i32_nxv1i64(<vscale x 1 x i32> %val, i32* %base, <vscale x 1 x i64> %index, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv1i32_nxv1i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
define void @test_vsuxseg2_mask_nxv1i32_nxv1i64(<vscale x 1 x i32> %val, i32* %base, <vscale x 1 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv1i32_nxv1i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
define void @test_vsuxseg2_nxv1i32_nxv1i32(<vscale x 1 x i32> %val, i32* %base, <vscale x 1 x i32> %index, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv1i32_nxv1i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
define void @test_vsuxseg2_mask_nxv1i32_nxv1i32(<vscale x 1 x i32> %val, i32* %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv1i32_nxv1i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
define void @test_vsuxseg2_nxv1i32_nxv1i16(<vscale x 1 x i32> %val, i32* %base, <vscale x 1 x i16> %index, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv1i32_nxv1i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
define void @test_vsuxseg2_mask_nxv1i32_nxv1i16(<vscale x 1 x i32> %val, i32* %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv1i32_nxv1i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
define void @test_vsuxseg2_nxv1i32_nxv1i8(<vscale x 1 x i32> %val, i32* %base, <vscale x 1 x i8> %index, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv1i32_nxv1i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
define void @test_vsuxseg2_mask_nxv1i32_nxv1i8(<vscale x 1 x i32> %val, i32* %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv1i32_nxv1i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-LABEL: test_vsuxseg3_nxv1i32_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vsuxseg3ei64.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_mask_nxv1i32_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vsuxseg3ei64.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_nxv1i32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vsuxseg3ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_mask_nxv1i32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vsuxseg3ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_nxv1i32_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vsuxseg3ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_mask_nxv1i32_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vsuxseg3ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_nxv1i32_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vsuxseg3ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_mask_nxv1i32_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vsuxseg3ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_nxv1i32_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vsuxseg4ei64.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_mask_nxv1i32_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vsuxseg4ei64.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_nxv1i32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vsuxseg4ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_mask_nxv1i32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vsuxseg4ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_nxv1i32_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vsuxseg4ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_mask_nxv1i32_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vsuxseg4ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_nxv1i32_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vsuxseg4ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_mask_nxv1i32_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vsuxseg4ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_nxv1i32_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vsuxseg5ei64.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_mask_nxv1i32_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vsuxseg5ei64.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_nxv1i32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vsuxseg5ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_mask_nxv1i32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vsuxseg5ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_nxv1i32_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vsuxseg5ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_mask_nxv1i32_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vsuxseg5ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_nxv1i32_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vsuxseg5ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_mask_nxv1i32_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vsuxseg5ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_nxv1i32_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vsuxseg6ei64.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_mask_nxv1i32_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vsuxseg6ei64.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_nxv1i32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vsuxseg6ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_mask_nxv1i32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vsuxseg6ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_nxv1i32_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vsuxseg6ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_mask_nxv1i32_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vsuxseg6ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_nxv1i32_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vsuxseg6ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_mask_nxv1i32_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vsuxseg6ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_nxv1i32_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vsuxseg7ei64.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_mask_nxv1i32_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vsuxseg7ei64.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_nxv1i32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vsuxseg7ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_mask_nxv1i32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vsuxseg7ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_nxv1i32_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vsuxseg7ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_mask_nxv1i32_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
-; CHECK-NEXT: vsuxseg7ei16.v v10, (a0), v9, v0.t
-; CHECK-NEXT: ret
-entry:
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
+; CHECK-NEXT: vsuxseg7ei16.v v10, (a0), v9, v0.t
+; CHECK-NEXT: ret
+entry:
tail call void @llvm.riscv.vsuxseg7.mask.nxv1i32.nxv1i16(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, i32* %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl)
ret void
}
; CHECK-LABEL: test_vsuxseg7_nxv1i32_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vsuxseg7ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_mask_nxv1i32_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vsuxseg7ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_nxv1i32_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vsuxseg8ei64.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_mask_nxv1i32_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vsuxseg8ei64.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_nxv1i32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vsuxseg8ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_mask_nxv1i32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vsuxseg8ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_nxv1i32_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vsuxseg8ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_mask_nxv1i32_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vsuxseg8ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_nxv1i32_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vsuxseg8ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_mask_nxv1i32_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vsuxseg8ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
define void @test_vsuxseg2_nxv8i16_nxv8i16(<vscale x 8 x i16> %val, i16* %base, <vscale x 8 x i16> %index, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv8i16_nxv8i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
; CHECK-NEXT: vmv2r.v v12, v10
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
define void @test_vsuxseg2_mask_nxv8i16_nxv8i16(<vscale x 8 x i16> %val, i16* %base, <vscale x 8 x i16> %index, <vscale x 8 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv8i16_nxv8i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
; CHECK-NEXT: vmv2r.v v12, v10
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
define void @test_vsuxseg2_nxv8i16_nxv8i8(<vscale x 8 x i16> %val, i16* %base, <vscale x 8 x i8> %index, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv8i16_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
; CHECK-NEXT: vmv1r.v v12, v10
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
define void @test_vsuxseg2_mask_nxv8i16_nxv8i8(<vscale x 8 x i16> %val, i16* %base, <vscale x 8 x i8> %index, <vscale x 8 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv8i16_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
; CHECK-NEXT: vmv1r.v v12, v10
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
define void @test_vsuxseg2_nxv8i16_nxv8i64(<vscale x 8 x i16> %val, i16* %base, <vscale x 8 x i64> %index, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv8i16_nxv8i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
; CHECK-NEXT: vsuxseg2ei64.v v8, (a0), v16
define void @test_vsuxseg2_mask_nxv8i16_nxv8i64(<vscale x 8 x i16> %val, i16* %base, <vscale x 8 x i64> %index, <vscale x 8 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv8i16_nxv8i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
; CHECK-NEXT: vsuxseg2ei64.v v8, (a0), v16, v0.t
define void @test_vsuxseg2_nxv8i16_nxv8i32(<vscale x 8 x i16> %val, i16* %base, <vscale x 8 x i32> %index, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv8i16_nxv8i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v12
define void @test_vsuxseg2_mask_nxv8i16_nxv8i32(<vscale x 8 x i16> %val, i16* %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv8i16_nxv8i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v12, v0.t
; CHECK-LABEL: test_vsuxseg3_nxv8i16_nxv8i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
; CHECK-NEXT: vsuxseg3ei16.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_mask_nxv8i16_nxv8i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
; CHECK-NEXT: vsuxseg3ei16.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_nxv8i16_nxv8i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
; CHECK-NEXT: vsuxseg3ei8.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_mask_nxv8i16_nxv8i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
; CHECK-NEXT: vsuxseg3ei8.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
define void @test_vsuxseg3_nxv8i16_nxv8i64(<vscale x 8 x i16> %val, i16* %base, <vscale x 8 x i64> %index, i64 %vl) {
; CHECK-LABEL: test_vsuxseg3_nxv8i16_nxv8i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
define void @test_vsuxseg3_mask_nxv8i16_nxv8i64(<vscale x 8 x i16> %val, i16* %base, <vscale x 8 x i64> %index, <vscale x 8 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsuxseg3_mask_nxv8i16_nxv8i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
define void @test_vsuxseg3_nxv8i16_nxv8i32(<vscale x 8 x i16> %val, i16* %base, <vscale x 8 x i32> %index, i64 %vl) {
; CHECK-LABEL: test_vsuxseg3_nxv8i16_nxv8i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv2r.v v16, v8
-; CHECK-NEXT: vmv2r.v v18, v16
-; CHECK-NEXT: vmv2r.v v20, v16
+; CHECK-NEXT: vmv2r.v v10, v8
+; CHECK-NEXT: vmv4r.v v16, v12
+; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
-; CHECK-NEXT: vsuxseg3ei32.v v16, (a0), v12
+; CHECK-NEXT: vsuxseg3ei32.v v8, (a0), v16
; CHECK-NEXT: ret
entry:
tail call void @llvm.riscv.vsuxseg3.nxv8i16.nxv8i32(<vscale x 8 x i16> %val,<vscale x 8 x i16> %val,<vscale x 8 x i16> %val, i16* %base, <vscale x 8 x i32> %index, i64 %vl)
define void @test_vsuxseg3_mask_nxv8i16_nxv8i32(<vscale x 8 x i16> %val, i16* %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsuxseg3_mask_nxv8i16_nxv8i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv2r.v v16, v8
-; CHECK-NEXT: vmv2r.v v18, v16
-; CHECK-NEXT: vmv2r.v v20, v16
+; CHECK-NEXT: vmv2r.v v10, v8
+; CHECK-NEXT: vmv4r.v v16, v12
+; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
-; CHECK-NEXT: vsuxseg3ei32.v v16, (a0), v12, v0.t
+; CHECK-NEXT: vsuxseg3ei32.v v8, (a0), v16, v0.t
; CHECK-NEXT: ret
entry:
tail call void @llvm.riscv.vsuxseg3.mask.nxv8i16.nxv8i32(<vscale x 8 x i16> %val,<vscale x 8 x i16> %val,<vscale x 8 x i16> %val, i16* %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i64 %vl)
; CHECK-LABEL: test_vsuxseg4_nxv8i16_nxv8i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
; CHECK-NEXT: vsuxseg4ei16.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_mask_nxv8i16_nxv8i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
; CHECK-NEXT: vsuxseg4ei16.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_nxv8i16_nxv8i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
; CHECK-NEXT: vsuxseg4ei8.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_mask_nxv8i16_nxv8i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
; CHECK-NEXT: vsuxseg4ei8.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
define void @test_vsuxseg4_nxv8i16_nxv8i64(<vscale x 8 x i16> %val, i16* %base, <vscale x 8 x i64> %index, i64 %vl) {
; CHECK-LABEL: test_vsuxseg4_nxv8i16_nxv8i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2_v14m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vmv2r.v v14, v8
define void @test_vsuxseg4_mask_nxv8i16_nxv8i64(<vscale x 8 x i16> %val, i16* %base, <vscale x 8 x i64> %index, <vscale x 8 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsuxseg4_mask_nxv8i16_nxv8i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2_v14m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vmv2r.v v14, v8
; CHECK-LABEL: test_vsuxseg4_nxv8i16_nxv8i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v16, v8
-; CHECK-NEXT: vmv2r.v v18, v16
-; CHECK-NEXT: vmv2r.v v20, v16
-; CHECK-NEXT: vmv2r.v v22, v16
+; CHECK-NEXT: vmv2r.v v18, v8
+; CHECK-NEXT: vmv2r.v v20, v8
+; CHECK-NEXT: vmv2r.v v22, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
; CHECK-NEXT: vsuxseg4ei32.v v16, (a0), v12
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_mask_nxv8i16_nxv8i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v16, v8
-; CHECK-NEXT: vmv2r.v v18, v16
-; CHECK-NEXT: vmv2r.v v20, v16
-; CHECK-NEXT: vmv2r.v v22, v16
+; CHECK-NEXT: vmv2r.v v18, v8
+; CHECK-NEXT: vmv2r.v v20, v8
+; CHECK-NEXT: vmv2r.v v22, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
; CHECK-NEXT: vsuxseg4ei32.v v16, (a0), v12, v0.t
; CHECK-NEXT: ret
define void @test_vsuxseg2_nxv4i8_nxv4i32(<vscale x 4 x i8> %val, i8* %base, <vscale x 4 x i32> %index, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv4i8_nxv4i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v10
define void @test_vsuxseg2_mask_nxv4i8_nxv4i32(<vscale x 4 x i8> %val, i8* %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv4i8_nxv4i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v10, v0.t
define void @test_vsuxseg2_nxv4i8_nxv4i8(<vscale x 4 x i8> %val, i8* %base, <vscale x 4 x i8> %index, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv4i8_nxv4i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
define void @test_vsuxseg2_mask_nxv4i8_nxv4i8(<vscale x 4 x i8> %val, i8* %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv4i8_nxv4i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
define void @test_vsuxseg2_nxv4i8_nxv4i64(<vscale x 4 x i8> %val, i8* %base, <vscale x 4 x i64> %index, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv4i8_nxv4i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vsuxseg2ei64.v v8, (a0), v12
define void @test_vsuxseg2_mask_nxv4i8_nxv4i64(<vscale x 4 x i8> %val, i8* %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv4i8_nxv4i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vsuxseg2ei64.v v8, (a0), v12, v0.t
define void @test_vsuxseg2_nxv4i8_nxv4i16(<vscale x 4 x i8> %val, i8* %base, <vscale x 4 x i16> %index, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv4i8_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
define void @test_vsuxseg2_mask_nxv4i8_nxv4i16(<vscale x 4 x i8> %val, i8* %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv4i8_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
define void @test_vsuxseg3_nxv4i8_nxv4i32(<vscale x 4 x i8> %val, i8* %base, <vscale x 4 x i32> %index, i64 %vl) {
; CHECK-LABEL: test_vsuxseg3_nxv4i8_nxv4i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv2r.v v12, v10
+; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
-; CHECK-NEXT: vsuxseg3ei32.v v12, (a0), v10
+; CHECK-NEXT: vsuxseg3ei32.v v8, (a0), v12
; CHECK-NEXT: ret
entry:
tail call void @llvm.riscv.vsuxseg3.nxv4i8.nxv4i32(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 4 x i32> %index, i64 %vl)
define void @test_vsuxseg3_mask_nxv4i8_nxv4i32(<vscale x 4 x i8> %val, i8* %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsuxseg3_mask_nxv4i8_nxv4i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv2r.v v12, v10
+; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
-; CHECK-NEXT: vsuxseg3ei32.v v12, (a0), v10, v0.t
+; CHECK-NEXT: vsuxseg3ei32.v v8, (a0), v12, v0.t
; CHECK-NEXT: ret
entry:
tail call void @llvm.riscv.vsuxseg3.mask.nxv4i8.nxv4i32(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl)
; CHECK-LABEL: test_vsuxseg3_nxv4i8_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vsuxseg3ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_mask_nxv4i8_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vsuxseg3ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
define void @test_vsuxseg3_nxv4i8_nxv4i64(<vscale x 4 x i8> %val, i8* %base, <vscale x 4 x i64> %index, i64 %vl) {
; CHECK-LABEL: test_vsuxseg3_nxv4i8_nxv4i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
define void @test_vsuxseg3_mask_nxv4i8_nxv4i64(<vscale x 4 x i8> %val, i8* %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsuxseg3_mask_nxv4i8_nxv4i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-LABEL: test_vsuxseg3_nxv4i8_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vsuxseg3ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_mask_nxv4i8_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vsuxseg3ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_nxv4i8_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vsuxseg4ei32.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_mask_nxv4i8_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vsuxseg4ei32.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_nxv4i8_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vsuxseg4ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_mask_nxv4i8_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vsuxseg4ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
define void @test_vsuxseg4_nxv4i8_nxv4i64(<vscale x 4 x i8> %val, i8* %base, <vscale x 4 x i64> %index, i64 %vl) {
; CHECK-LABEL: test_vsuxseg4_nxv4i8_nxv4i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsuxseg4_mask_nxv4i8_nxv4i64(<vscale x 4 x i8> %val, i8* %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsuxseg4_mask_nxv4i8_nxv4i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
; CHECK-LABEL: test_vsuxseg4_nxv4i8_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vsuxseg4ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_mask_nxv4i8_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vsuxseg4ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_nxv4i8_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vsuxseg5ei32.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_mask_nxv4i8_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vsuxseg5ei32.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_nxv4i8_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vsuxseg5ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_mask_nxv4i8_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vsuxseg5ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
define void @test_vsuxseg5_nxv4i8_nxv4i64(<vscale x 4 x i8> %val, i8* %base, <vscale x 4 x i64> %index, i64 %vl) {
; CHECK-LABEL: test_vsuxseg5_nxv4i8_nxv4i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v16, v8
-; CHECK-NEXT: vmv1r.v v17, v16
-; CHECK-NEXT: vmv1r.v v18, v16
-; CHECK-NEXT: vmv1r.v v19, v16
-; CHECK-NEXT: vmv1r.v v20, v16
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv4r.v v16, v12
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
-; CHECK-NEXT: vsuxseg5ei64.v v16, (a0), v12
+; CHECK-NEXT: vsuxseg5ei64.v v8, (a0), v16
; CHECK-NEXT: ret
entry:
tail call void @llvm.riscv.vsuxseg5.nxv4i8.nxv4i64(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 4 x i64> %index, i64 %vl)
define void @test_vsuxseg5_mask_nxv4i8_nxv4i64(<vscale x 4 x i8> %val, i8* %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsuxseg5_mask_nxv4i8_nxv4i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v16, v8
-; CHECK-NEXT: vmv1r.v v17, v16
-; CHECK-NEXT: vmv1r.v v18, v16
-; CHECK-NEXT: vmv1r.v v19, v16
-; CHECK-NEXT: vmv1r.v v20, v16
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv4r.v v16, v12
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
-; CHECK-NEXT: vsuxseg5ei64.v v16, (a0), v12, v0.t
+; CHECK-NEXT: vsuxseg5ei64.v v8, (a0), v16, v0.t
; CHECK-NEXT: ret
entry:
tail call void @llvm.riscv.vsuxseg5.mask.nxv4i8.nxv4i64(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, i8* %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl)
; CHECK-LABEL: test_vsuxseg5_nxv4i8_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vsuxseg5ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_mask_nxv4i8_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vsuxseg5ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_nxv4i8_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vsuxseg6ei32.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_mask_nxv4i8_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vsuxseg6ei32.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_nxv4i8_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vsuxseg6ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_mask_nxv4i8_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vsuxseg6ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_nxv4i8_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v16, v8
-; CHECK-NEXT: vmv1r.v v17, v16
-; CHECK-NEXT: vmv1r.v v18, v16
-; CHECK-NEXT: vmv1r.v v19, v16
-; CHECK-NEXT: vmv1r.v v20, v16
-; CHECK-NEXT: vmv1r.v v21, v16
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
+; CHECK-NEXT: vmv1r.v v19, v8
+; CHECK-NEXT: vmv1r.v v20, v8
+; CHECK-NEXT: vmv1r.v v21, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vsuxseg6ei64.v v16, (a0), v12
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_mask_nxv4i8_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v16, v8
-; CHECK-NEXT: vmv1r.v v17, v16
-; CHECK-NEXT: vmv1r.v v18, v16
-; CHECK-NEXT: vmv1r.v v19, v16
-; CHECK-NEXT: vmv1r.v v20, v16
-; CHECK-NEXT: vmv1r.v v21, v16
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
+; CHECK-NEXT: vmv1r.v v19, v8
+; CHECK-NEXT: vmv1r.v v20, v8
+; CHECK-NEXT: vmv1r.v v21, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vsuxseg6ei64.v v16, (a0), v12, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_nxv4i8_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vsuxseg6ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_mask_nxv4i8_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vsuxseg6ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_nxv4i8_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
-; CHECK-NEXT: vmv1r.v v18, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vsuxseg7ei32.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_mask_nxv4i8_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
-; CHECK-NEXT: vmv1r.v v18, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vsuxseg7ei32.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_nxv4i8_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vsuxseg7ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_mask_nxv4i8_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vsuxseg7ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_nxv4i8_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v16, v8
-; CHECK-NEXT: vmv1r.v v17, v16
-; CHECK-NEXT: vmv1r.v v18, v16
-; CHECK-NEXT: vmv1r.v v19, v16
-; CHECK-NEXT: vmv1r.v v20, v16
-; CHECK-NEXT: vmv1r.v v21, v16
-; CHECK-NEXT: vmv1r.v v22, v16
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
+; CHECK-NEXT: vmv1r.v v19, v8
+; CHECK-NEXT: vmv1r.v v20, v8
+; CHECK-NEXT: vmv1r.v v21, v8
+; CHECK-NEXT: vmv1r.v v22, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vsuxseg7ei64.v v16, (a0), v12
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_mask_nxv4i8_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v16, v8
-; CHECK-NEXT: vmv1r.v v17, v16
-; CHECK-NEXT: vmv1r.v v18, v16
-; CHECK-NEXT: vmv1r.v v19, v16
-; CHECK-NEXT: vmv1r.v v20, v16
-; CHECK-NEXT: vmv1r.v v21, v16
-; CHECK-NEXT: vmv1r.v v22, v16
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
+; CHECK-NEXT: vmv1r.v v19, v8
+; CHECK-NEXT: vmv1r.v v20, v8
+; CHECK-NEXT: vmv1r.v v21, v8
+; CHECK-NEXT: vmv1r.v v22, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vsuxseg7ei64.v v16, (a0), v12, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_nxv4i8_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vsuxseg7ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_mask_nxv4i8_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vsuxseg7ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_nxv4i8_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
-; CHECK-NEXT: vmv1r.v v18, v12
-; CHECK-NEXT: vmv1r.v v19, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
+; CHECK-NEXT: vmv1r.v v19, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vsuxseg8ei32.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_mask_nxv4i8_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
-; CHECK-NEXT: vmv1r.v v18, v12
-; CHECK-NEXT: vmv1r.v v19, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
+; CHECK-NEXT: vmv1r.v v19, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vsuxseg8ei32.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_nxv4i8_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vsuxseg8ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_mask_nxv4i8_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vsuxseg8ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_nxv4i8_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v16, v8
-; CHECK-NEXT: vmv1r.v v17, v16
-; CHECK-NEXT: vmv1r.v v18, v16
-; CHECK-NEXT: vmv1r.v v19, v16
-; CHECK-NEXT: vmv1r.v v20, v16
-; CHECK-NEXT: vmv1r.v v21, v16
-; CHECK-NEXT: vmv1r.v v22, v16
-; CHECK-NEXT: vmv1r.v v23, v16
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
+; CHECK-NEXT: vmv1r.v v19, v8
+; CHECK-NEXT: vmv1r.v v20, v8
+; CHECK-NEXT: vmv1r.v v21, v8
+; CHECK-NEXT: vmv1r.v v22, v8
+; CHECK-NEXT: vmv1r.v v23, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vsuxseg8ei64.v v16, (a0), v12
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_mask_nxv4i8_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v16, v8
-; CHECK-NEXT: vmv1r.v v17, v16
-; CHECK-NEXT: vmv1r.v v18, v16
-; CHECK-NEXT: vmv1r.v v19, v16
-; CHECK-NEXT: vmv1r.v v20, v16
-; CHECK-NEXT: vmv1r.v v21, v16
-; CHECK-NEXT: vmv1r.v v22, v16
-; CHECK-NEXT: vmv1r.v v23, v16
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
+; CHECK-NEXT: vmv1r.v v19, v8
+; CHECK-NEXT: vmv1r.v v20, v8
+; CHECK-NEXT: vmv1r.v v21, v8
+; CHECK-NEXT: vmv1r.v v22, v8
+; CHECK-NEXT: vmv1r.v v23, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vsuxseg8ei64.v v16, (a0), v12, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_nxv4i8_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vsuxseg8ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_mask_nxv4i8_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vsuxseg8ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
define void @test_vsuxseg2_nxv1i16_nxv1i64(<vscale x 1 x i16> %val, i16* %base, <vscale x 1 x i64> %index, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv1i16_nxv1i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
define void @test_vsuxseg2_mask_nxv1i16_nxv1i64(<vscale x 1 x i16> %val, i16* %base, <vscale x 1 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv1i16_nxv1i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
define void @test_vsuxseg2_nxv1i16_nxv1i32(<vscale x 1 x i16> %val, i16* %base, <vscale x 1 x i32> %index, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv1i16_nxv1i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
define void @test_vsuxseg2_mask_nxv1i16_nxv1i32(<vscale x 1 x i16> %val, i16* %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv1i16_nxv1i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
define void @test_vsuxseg2_nxv1i16_nxv1i16(<vscale x 1 x i16> %val, i16* %base, <vscale x 1 x i16> %index, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv1i16_nxv1i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
define void @test_vsuxseg2_mask_nxv1i16_nxv1i16(<vscale x 1 x i16> %val, i16* %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv1i16_nxv1i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
define void @test_vsuxseg2_nxv1i16_nxv1i8(<vscale x 1 x i16> %val, i16* %base, <vscale x 1 x i8> %index, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv1i16_nxv1i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
define void @test_vsuxseg2_mask_nxv1i16_nxv1i8(<vscale x 1 x i16> %val, i16* %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv1i16_nxv1i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-LABEL: test_vsuxseg3_nxv1i16_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vsuxseg3ei64.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_mask_nxv1i16_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vsuxseg3ei64.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_nxv1i16_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vsuxseg3ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_mask_nxv1i16_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vsuxseg3ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_nxv1i16_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vsuxseg3ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_mask_nxv1i16_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vsuxseg3ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_nxv1i16_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vsuxseg3ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_mask_nxv1i16_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vsuxseg3ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_nxv1i16_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vsuxseg4ei64.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_mask_nxv1i16_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vsuxseg4ei64.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_nxv1i16_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vsuxseg4ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_mask_nxv1i16_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vsuxseg4ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_nxv1i16_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vsuxseg4ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_mask_nxv1i16_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vsuxseg4ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_nxv1i16_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vsuxseg4ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_mask_nxv1i16_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vsuxseg4ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_nxv1i16_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vsuxseg5ei64.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_mask_nxv1i16_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vsuxseg5ei64.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_nxv1i16_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vsuxseg5ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_mask_nxv1i16_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vsuxseg5ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_nxv1i16_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vsuxseg5ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_mask_nxv1i16_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vsuxseg5ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_nxv1i16_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vsuxseg5ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_mask_nxv1i16_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vsuxseg5ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_nxv1i16_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vsuxseg6ei64.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_mask_nxv1i16_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vsuxseg6ei64.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_nxv1i16_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vsuxseg6ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_mask_nxv1i16_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vsuxseg6ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_nxv1i16_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vsuxseg6ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_mask_nxv1i16_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vsuxseg6ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_nxv1i16_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vsuxseg6ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_mask_nxv1i16_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vsuxseg6ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_nxv1i16_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vsuxseg7ei64.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_mask_nxv1i16_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vsuxseg7ei64.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_nxv1i16_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vsuxseg7ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_mask_nxv1i16_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vsuxseg7ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_nxv1i16_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vsuxseg7ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_mask_nxv1i16_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vsuxseg7ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_nxv1i16_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vsuxseg7ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_mask_nxv1i16_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
-; CHECK-NEXT: vsuxseg7ei8.v v10, (a0), v9, v0.t
-; CHECK-NEXT: ret
-entry:
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
+; CHECK-NEXT: vsuxseg7ei8.v v10, (a0), v9, v0.t
+; CHECK-NEXT: ret
+entry:
tail call void @llvm.riscv.vsuxseg7.mask.nxv1i16.nxv1i8(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, i16* %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl)
ret void
}
; CHECK-LABEL: test_vsuxseg8_nxv1i16_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vsuxseg8ei64.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_mask_nxv1i16_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vsuxseg8ei64.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_nxv1i16_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vsuxseg8ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_mask_nxv1i16_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vsuxseg8ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_nxv1i16_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vsuxseg8ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_mask_nxv1i16_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vsuxseg8ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_nxv1i16_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vsuxseg8ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_mask_nxv1i16_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vsuxseg8ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
define void @test_vsuxseg2_nxv2i32_nxv2i32(<vscale x 2 x i32> %val, i32* %base, <vscale x 2 x i32> %index, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv2i32_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
define void @test_vsuxseg2_mask_nxv2i32_nxv2i32(<vscale x 2 x i32> %val, i32* %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv2i32_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
define void @test_vsuxseg2_nxv2i32_nxv2i8(<vscale x 2 x i32> %val, i32* %base, <vscale x 2 x i8> %index, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv2i32_nxv2i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
define void @test_vsuxseg2_mask_nxv2i32_nxv2i8(<vscale x 2 x i32> %val, i32* %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv2i32_nxv2i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
define void @test_vsuxseg2_nxv2i32_nxv2i16(<vscale x 2 x i32> %val, i32* %base, <vscale x 2 x i16> %index, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv2i32_nxv2i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
define void @test_vsuxseg2_mask_nxv2i32_nxv2i16(<vscale x 2 x i32> %val, i32* %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv2i32_nxv2i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
define void @test_vsuxseg2_nxv2i32_nxv2i64(<vscale x 2 x i32> %val, i32* %base, <vscale x 2 x i64> %index, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv2i32_nxv2i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vsuxseg2ei64.v v8, (a0), v10
define void @test_vsuxseg2_mask_nxv2i32_nxv2i64(<vscale x 2 x i32> %val, i32* %base, <vscale x 2 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv2i32_nxv2i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vsuxseg2ei64.v v8, (a0), v10, v0.t
; CHECK-LABEL: test_vsuxseg3_nxv2i32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vsuxseg3ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_mask_nxv2i32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vsuxseg3ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_nxv2i32_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vsuxseg3ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_mask_nxv2i32_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vsuxseg3ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_nxv2i32_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vsuxseg3ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_mask_nxv2i32_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vsuxseg3ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
define void @test_vsuxseg3_nxv2i32_nxv2i64(<vscale x 2 x i32> %val, i32* %base, <vscale x 2 x i64> %index, i64 %vl) {
; CHECK-LABEL: test_vsuxseg3_nxv2i32_nxv2i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv2r.v v12, v10
+; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
-; CHECK-NEXT: vsuxseg3ei64.v v12, (a0), v10
+; CHECK-NEXT: vsuxseg3ei64.v v8, (a0), v12
; CHECK-NEXT: ret
entry:
tail call void @llvm.riscv.vsuxseg3.nxv2i32.nxv2i64(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 2 x i64> %index, i64 %vl)
define void @test_vsuxseg3_mask_nxv2i32_nxv2i64(<vscale x 2 x i32> %val, i32* %base, <vscale x 2 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsuxseg3_mask_nxv2i32_nxv2i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv2r.v v12, v10
+; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
-; CHECK-NEXT: vsuxseg3ei64.v v12, (a0), v10, v0.t
+; CHECK-NEXT: vsuxseg3ei64.v v8, (a0), v12, v0.t
; CHECK-NEXT: ret
entry:
tail call void @llvm.riscv.vsuxseg3.mask.nxv2i32.nxv2i64(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, i32* %base, <vscale x 2 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl)
; CHECK-LABEL: test_vsuxseg4_nxv2i32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vsuxseg4ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_mask_nxv2i32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vsuxseg4ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_nxv2i32_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vsuxseg4ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_mask_nxv2i32_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vsuxseg4ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_nxv2i32_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vsuxseg4ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_mask_nxv2i32_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vsuxseg4ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_nxv2i32_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vsuxseg4ei64.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_mask_nxv2i32_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vsuxseg4ei64.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_nxv2i32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vsuxseg5ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_mask_nxv2i32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vsuxseg5ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_nxv2i32_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vsuxseg5ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_mask_nxv2i32_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vsuxseg5ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_nxv2i32_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vsuxseg5ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_mask_nxv2i32_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vsuxseg5ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_nxv2i32_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vsuxseg5ei64.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_mask_nxv2i32_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vsuxseg5ei64.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_nxv2i32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vsuxseg6ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_mask_nxv2i32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vsuxseg6ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_nxv2i32_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vsuxseg6ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_mask_nxv2i32_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vsuxseg6ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_nxv2i32_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vsuxseg6ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_mask_nxv2i32_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vsuxseg6ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_nxv2i32_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vsuxseg6ei64.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_mask_nxv2i32_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vsuxseg6ei64.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_nxv2i32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vsuxseg7ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_mask_nxv2i32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vsuxseg7ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_nxv2i32_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vsuxseg7ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_mask_nxv2i32_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vsuxseg7ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_nxv2i32_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vsuxseg7ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_mask_nxv2i32_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vsuxseg7ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_nxv2i32_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
-; CHECK-NEXT: vmv1r.v v18, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vsuxseg7ei64.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_mask_nxv2i32_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
-; CHECK-NEXT: vmv1r.v v18, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vsuxseg7ei64.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_nxv2i32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vsuxseg8ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_mask_nxv2i32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vsuxseg8ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_nxv2i32_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vsuxseg8ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_mask_nxv2i32_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vsuxseg8ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_nxv2i32_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vsuxseg8ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_mask_nxv2i32_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vsuxseg8ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_nxv2i32_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
-; CHECK-NEXT: vmv1r.v v18, v12
-; CHECK-NEXT: vmv1r.v v19, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
+; CHECK-NEXT: vmv1r.v v19, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vsuxseg8ei64.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_mask_nxv2i32_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
-; CHECK-NEXT: vmv1r.v v18, v12
-; CHECK-NEXT: vmv1r.v v19, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
+; CHECK-NEXT: vmv1r.v v19, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vsuxseg8ei64.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
define void @test_vsuxseg2_nxv8i8_nxv8i16(<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i16> %index, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv8i8_nxv8i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v10
define void @test_vsuxseg2_mask_nxv8i8_nxv8i16(<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i16> %index, <vscale x 8 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv8i8_nxv8i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v10, v0.t
define void @test_vsuxseg2_nxv8i8_nxv8i8(<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i8> %index, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv8i8_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
define void @test_vsuxseg2_mask_nxv8i8_nxv8i8(<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i8> %index, <vscale x 8 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv8i8_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
define void @test_vsuxseg2_nxv8i8_nxv8i64(<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i64> %index, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv8i8_nxv8i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vsuxseg2ei64.v v8, (a0), v16
define void @test_vsuxseg2_mask_nxv8i8_nxv8i64(<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i64> %index, <vscale x 8 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv8i8_nxv8i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vsuxseg2ei64.v v8, (a0), v16, v0.t
define void @test_vsuxseg2_nxv8i8_nxv8i32(<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i32> %index, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv8i8_nxv8i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v12
define void @test_vsuxseg2_mask_nxv8i8_nxv8i32(<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv8i8_nxv8i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v12, v0.t
define void @test_vsuxseg3_nxv8i8_nxv8i16(<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i16> %index, i64 %vl) {
; CHECK-LABEL: test_vsuxseg3_nxv8i8_nxv8i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv2r.v v12, v10
+; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
-; CHECK-NEXT: vsuxseg3ei16.v v12, (a0), v10
+; CHECK-NEXT: vsuxseg3ei16.v v8, (a0), v12
; CHECK-NEXT: ret
entry:
tail call void @llvm.riscv.vsuxseg3.nxv8i8.nxv8i16(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i16> %index, i64 %vl)
define void @test_vsuxseg3_mask_nxv8i8_nxv8i16(<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i16> %index, <vscale x 8 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsuxseg3_mask_nxv8i8_nxv8i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv2r.v v12, v10
+; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
-; CHECK-NEXT: vsuxseg3ei16.v v12, (a0), v10, v0.t
+; CHECK-NEXT: vsuxseg3ei16.v v8, (a0), v12, v0.t
; CHECK-NEXT: ret
entry:
tail call void @llvm.riscv.vsuxseg3.mask.nxv8i8.nxv8i16(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i16> %index, <vscale x 8 x i1> %mask, i64 %vl)
; CHECK-LABEL: test_vsuxseg3_nxv8i8_nxv8i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vsuxseg3ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_mask_nxv8i8_nxv8i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vsuxseg3ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
define void @test_vsuxseg3_nxv8i8_nxv8i64(<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i64> %index, i64 %vl) {
; CHECK-LABEL: test_vsuxseg3_nxv8i8_nxv8i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
define void @test_vsuxseg3_mask_nxv8i8_nxv8i64(<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i64> %index, <vscale x 8 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsuxseg3_mask_nxv8i8_nxv8i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
define void @test_vsuxseg3_nxv8i8_nxv8i32(<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i32> %index, i64 %vl) {
; CHECK-LABEL: test_vsuxseg3_nxv8i8_nxv8i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
define void @test_vsuxseg3_mask_nxv8i8_nxv8i32(<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsuxseg3_mask_nxv8i8_nxv8i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-LABEL: test_vsuxseg4_nxv8i8_nxv8i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vsuxseg4ei16.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_mask_nxv8i8_nxv8i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vsuxseg4ei16.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_nxv8i8_nxv8i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vsuxseg4ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_mask_nxv8i8_nxv8i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vsuxseg4ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
define void @test_vsuxseg4_nxv8i8_nxv8i64(<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i64> %index, i64 %vl) {
; CHECK-LABEL: test_vsuxseg4_nxv8i8_nxv8i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsuxseg4_mask_nxv8i8_nxv8i64(<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i64> %index, <vscale x 8 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsuxseg4_mask_nxv8i8_nxv8i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsuxseg4_nxv8i8_nxv8i32(<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i32> %index, i64 %vl) {
; CHECK-LABEL: test_vsuxseg4_nxv8i8_nxv8i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsuxseg4_mask_nxv8i8_nxv8i32(<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsuxseg4_mask_nxv8i8_nxv8i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
; CHECK-LABEL: test_vsuxseg5_nxv8i8_nxv8i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vsuxseg5ei16.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_mask_nxv8i8_nxv8i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vsuxseg5ei16.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_nxv8i8_nxv8i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vsuxseg5ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_mask_nxv8i8_nxv8i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vsuxseg5ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
define void @test_vsuxseg5_nxv8i8_nxv8i64(<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i64> %index, i64 %vl) {
; CHECK-LABEL: test_vsuxseg5_nxv8i8_nxv8i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsuxseg5_mask_nxv8i8_nxv8i64(<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i64> %index, <vscale x 8 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsuxseg5_mask_nxv8i8_nxv8i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsuxseg5_nxv8i8_nxv8i32(<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i32> %index, i64 %vl) {
; CHECK-LABEL: test_vsuxseg5_nxv8i8_nxv8i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v16, v8
-; CHECK-NEXT: vmv1r.v v17, v16
-; CHECK-NEXT: vmv1r.v v18, v16
-; CHECK-NEXT: vmv1r.v v19, v16
-; CHECK-NEXT: vmv1r.v v20, v16
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv4r.v v16, v12
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
-; CHECK-NEXT: vsuxseg5ei32.v v16, (a0), v12
+; CHECK-NEXT: vsuxseg5ei32.v v8, (a0), v16
; CHECK-NEXT: ret
entry:
tail call void @llvm.riscv.vsuxseg5.nxv8i8.nxv8i32(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i32> %index, i64 %vl)
define void @test_vsuxseg5_mask_nxv8i8_nxv8i32(<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsuxseg5_mask_nxv8i8_nxv8i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v16, v8
-; CHECK-NEXT: vmv1r.v v17, v16
-; CHECK-NEXT: vmv1r.v v18, v16
-; CHECK-NEXT: vmv1r.v v19, v16
-; CHECK-NEXT: vmv1r.v v20, v16
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv4r.v v16, v12
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
-; CHECK-NEXT: vsuxseg5ei32.v v16, (a0), v12, v0.t
+; CHECK-NEXT: vsuxseg5ei32.v v8, (a0), v16, v0.t
; CHECK-NEXT: ret
entry:
tail call void @llvm.riscv.vsuxseg5.mask.nxv8i8.nxv8i32(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i64 %vl)
; CHECK-LABEL: test_vsuxseg6_nxv8i8_nxv8i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vsuxseg6ei16.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_mask_nxv8i8_nxv8i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vsuxseg6ei16.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_nxv8i8_nxv8i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vsuxseg6ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_mask_nxv8i8_nxv8i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vsuxseg6ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
define void @test_vsuxseg6_nxv8i8_nxv8i64(<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i64> %index, i64 %vl) {
; CHECK-LABEL: test_vsuxseg6_nxv8i8_nxv8i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsuxseg6_mask_nxv8i8_nxv8i64(<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i64> %index, <vscale x 8 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsuxseg6_mask_nxv8i8_nxv8i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
; CHECK-LABEL: test_vsuxseg6_nxv8i8_nxv8i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v16, v8
-; CHECK-NEXT: vmv1r.v v17, v16
-; CHECK-NEXT: vmv1r.v v18, v16
-; CHECK-NEXT: vmv1r.v v19, v16
-; CHECK-NEXT: vmv1r.v v20, v16
-; CHECK-NEXT: vmv1r.v v21, v16
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
+; CHECK-NEXT: vmv1r.v v19, v8
+; CHECK-NEXT: vmv1r.v v20, v8
+; CHECK-NEXT: vmv1r.v v21, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vsuxseg6ei32.v v16, (a0), v12
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_mask_nxv8i8_nxv8i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v16, v8
-; CHECK-NEXT: vmv1r.v v17, v16
-; CHECK-NEXT: vmv1r.v v18, v16
-; CHECK-NEXT: vmv1r.v v19, v16
-; CHECK-NEXT: vmv1r.v v20, v16
-; CHECK-NEXT: vmv1r.v v21, v16
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
+; CHECK-NEXT: vmv1r.v v19, v8
+; CHECK-NEXT: vmv1r.v v20, v8
+; CHECK-NEXT: vmv1r.v v21, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vsuxseg6ei32.v v16, (a0), v12, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_nxv8i8_nxv8i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
-; CHECK-NEXT: vmv1r.v v18, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vsuxseg7ei16.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_mask_nxv8i8_nxv8i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
-; CHECK-NEXT: vmv1r.v v18, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vsuxseg7ei16.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_nxv8i8_nxv8i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vsuxseg7ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_mask_nxv8i8_nxv8i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vsuxseg7ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
define void @test_vsuxseg7_nxv8i8_nxv8i64(<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i64> %index, i64 %vl) {
; CHECK-LABEL: test_vsuxseg7_nxv8i8_nxv8i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsuxseg7_mask_nxv8i8_nxv8i64(<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i64> %index, <vscale x 8 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsuxseg7_mask_nxv8i8_nxv8i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
; CHECK-LABEL: test_vsuxseg7_nxv8i8_nxv8i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v16, v8
-; CHECK-NEXT: vmv1r.v v17, v16
-; CHECK-NEXT: vmv1r.v v18, v16
-; CHECK-NEXT: vmv1r.v v19, v16
-; CHECK-NEXT: vmv1r.v v20, v16
-; CHECK-NEXT: vmv1r.v v21, v16
-; CHECK-NEXT: vmv1r.v v22, v16
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
+; CHECK-NEXT: vmv1r.v v19, v8
+; CHECK-NEXT: vmv1r.v v20, v8
+; CHECK-NEXT: vmv1r.v v21, v8
+; CHECK-NEXT: vmv1r.v v22, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vsuxseg7ei32.v v16, (a0), v12
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_mask_nxv8i8_nxv8i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v16, v8
-; CHECK-NEXT: vmv1r.v v17, v16
-; CHECK-NEXT: vmv1r.v v18, v16
-; CHECK-NEXT: vmv1r.v v19, v16
-; CHECK-NEXT: vmv1r.v v20, v16
-; CHECK-NEXT: vmv1r.v v21, v16
-; CHECK-NEXT: vmv1r.v v22, v16
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
+; CHECK-NEXT: vmv1r.v v19, v8
+; CHECK-NEXT: vmv1r.v v20, v8
+; CHECK-NEXT: vmv1r.v v21, v8
+; CHECK-NEXT: vmv1r.v v22, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vsuxseg7ei32.v v16, (a0), v12, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_nxv8i8_nxv8i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
-; CHECK-NEXT: vmv1r.v v18, v12
-; CHECK-NEXT: vmv1r.v v19, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
+; CHECK-NEXT: vmv1r.v v19, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vsuxseg8ei16.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_mask_nxv8i8_nxv8i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
-; CHECK-NEXT: vmv1r.v v18, v12
-; CHECK-NEXT: vmv1r.v v19, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
+; CHECK-NEXT: vmv1r.v v19, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vsuxseg8ei16.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_nxv8i8_nxv8i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vsuxseg8ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_mask_nxv8i8_nxv8i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vsuxseg8ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
define void @test_vsuxseg8_nxv8i8_nxv8i64(<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i64> %index, i64 %vl) {
; CHECK-LABEL: test_vsuxseg8_nxv8i8_nxv8i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsuxseg8_mask_nxv8i8_nxv8i64(<vscale x 8 x i8> %val, i8* %base, <vscale x 8 x i64> %index, <vscale x 8 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsuxseg8_mask_nxv8i8_nxv8i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
; CHECK-LABEL: test_vsuxseg8_nxv8i8_nxv8i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v16, v8
-; CHECK-NEXT: vmv1r.v v17, v16
-; CHECK-NEXT: vmv1r.v v18, v16
-; CHECK-NEXT: vmv1r.v v19, v16
-; CHECK-NEXT: vmv1r.v v20, v16
-; CHECK-NEXT: vmv1r.v v21, v16
-; CHECK-NEXT: vmv1r.v v22, v16
-; CHECK-NEXT: vmv1r.v v23, v16
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
+; CHECK-NEXT: vmv1r.v v19, v8
+; CHECK-NEXT: vmv1r.v v20, v8
+; CHECK-NEXT: vmv1r.v v21, v8
+; CHECK-NEXT: vmv1r.v v22, v8
+; CHECK-NEXT: vmv1r.v v23, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vsuxseg8ei32.v v16, (a0), v12
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_mask_nxv8i8_nxv8i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v16, v8
-; CHECK-NEXT: vmv1r.v v17, v16
-; CHECK-NEXT: vmv1r.v v18, v16
-; CHECK-NEXT: vmv1r.v v19, v16
-; CHECK-NEXT: vmv1r.v v20, v16
-; CHECK-NEXT: vmv1r.v v21, v16
-; CHECK-NEXT: vmv1r.v v22, v16
-; CHECK-NEXT: vmv1r.v v23, v16
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
+; CHECK-NEXT: vmv1r.v v19, v8
+; CHECK-NEXT: vmv1r.v v20, v8
+; CHECK-NEXT: vmv1r.v v21, v8
+; CHECK-NEXT: vmv1r.v v22, v8
+; CHECK-NEXT: vmv1r.v v23, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vsuxseg8ei32.v v16, (a0), v12, v0.t
; CHECK-NEXT: ret
define void @test_vsuxseg2_nxv4i64_nxv4i32(<vscale x 4 x i64> %val, i64* %base, <vscale x 4 x i32> %index, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv4i64_nxv4i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
; CHECK-NEXT: vmv2r.v v16, v12
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu
define void @test_vsuxseg2_mask_nxv4i64_nxv4i32(<vscale x 4 x i64> %val, i64* %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv4i64_nxv4i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
; CHECK-NEXT: vmv2r.v v16, v12
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu
define void @test_vsuxseg2_nxv4i64_nxv4i8(<vscale x 4 x i64> %val, i64* %base, <vscale x 4 x i8> %index, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv4i64_nxv4i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
; CHECK-NEXT: vmv1r.v v16, v12
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu
define void @test_vsuxseg2_mask_nxv4i64_nxv4i8(<vscale x 4 x i64> %val, i64* %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv4i64_nxv4i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
; CHECK-NEXT: vmv1r.v v16, v12
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu
define void @test_vsuxseg2_nxv4i64_nxv4i64(<vscale x 4 x i64> %val, i64* %base, <vscale x 4 x i64> %index, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv4i64_nxv4i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
; CHECK-NEXT: vmv4r.v v16, v12
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu
define void @test_vsuxseg2_mask_nxv4i64_nxv4i64(<vscale x 4 x i64> %val, i64* %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv4i64_nxv4i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
; CHECK-NEXT: vmv4r.v v16, v12
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu
define void @test_vsuxseg2_nxv4i64_nxv4i16(<vscale x 4 x i64> %val, i64* %base, <vscale x 4 x i16> %index, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv4i64_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
; CHECK-NEXT: vmv1r.v v16, v12
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu
define void @test_vsuxseg2_mask_nxv4i64_nxv4i16(<vscale x 4 x i64> %val, i64* %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv4i64_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
; CHECK-NEXT: vmv1r.v v16, v12
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu
define void @test_vsuxseg2_nxv4i16_nxv4i32(<vscale x 4 x i16> %val, i16* %base, <vscale x 4 x i32> %index, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv4i16_nxv4i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v10
define void @test_vsuxseg2_mask_nxv4i16_nxv4i32(<vscale x 4 x i16> %val, i16* %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv4i16_nxv4i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v10, v0.t
define void @test_vsuxseg2_nxv4i16_nxv4i8(<vscale x 4 x i16> %val, i16* %base, <vscale x 4 x i8> %index, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv4i16_nxv4i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
define void @test_vsuxseg2_mask_nxv4i16_nxv4i8(<vscale x 4 x i16> %val, i16* %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv4i16_nxv4i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
define void @test_vsuxseg2_nxv4i16_nxv4i64(<vscale x 4 x i16> %val, i16* %base, <vscale x 4 x i64> %index, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv4i16_nxv4i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vsuxseg2ei64.v v8, (a0), v12
define void @test_vsuxseg2_mask_nxv4i16_nxv4i64(<vscale x 4 x i16> %val, i16* %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv4i16_nxv4i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vsuxseg2ei64.v v8, (a0), v12, v0.t
define void @test_vsuxseg2_nxv4i16_nxv4i16(<vscale x 4 x i16> %val, i16* %base, <vscale x 4 x i16> %index, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv4i16_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
define void @test_vsuxseg2_mask_nxv4i16_nxv4i16(<vscale x 4 x i16> %val, i16* %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv4i16_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
define void @test_vsuxseg3_nxv4i16_nxv4i32(<vscale x 4 x i16> %val, i16* %base, <vscale x 4 x i32> %index, i64 %vl) {
; CHECK-LABEL: test_vsuxseg3_nxv4i16_nxv4i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv2r.v v12, v10
+; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
-; CHECK-NEXT: vsuxseg3ei32.v v12, (a0), v10
+; CHECK-NEXT: vsuxseg3ei32.v v8, (a0), v12
; CHECK-NEXT: ret
entry:
tail call void @llvm.riscv.vsuxseg3.nxv4i16.nxv4i32(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 4 x i32> %index, i64 %vl)
define void @test_vsuxseg3_mask_nxv4i16_nxv4i32(<vscale x 4 x i16> %val, i16* %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsuxseg3_mask_nxv4i16_nxv4i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv2r.v v12, v10
+; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
-; CHECK-NEXT: vsuxseg3ei32.v v12, (a0), v10, v0.t
+; CHECK-NEXT: vsuxseg3ei32.v v8, (a0), v12, v0.t
; CHECK-NEXT: ret
entry:
tail call void @llvm.riscv.vsuxseg3.mask.nxv4i16.nxv4i32(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl)
; CHECK-LABEL: test_vsuxseg3_nxv4i16_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vsuxseg3ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_mask_nxv4i16_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vsuxseg3ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
define void @test_vsuxseg3_nxv4i16_nxv4i64(<vscale x 4 x i16> %val, i16* %base, <vscale x 4 x i64> %index, i64 %vl) {
; CHECK-LABEL: test_vsuxseg3_nxv4i16_nxv4i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
define void @test_vsuxseg3_mask_nxv4i16_nxv4i64(<vscale x 4 x i16> %val, i16* %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsuxseg3_mask_nxv4i16_nxv4i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-LABEL: test_vsuxseg3_nxv4i16_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vsuxseg3ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_mask_nxv4i16_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vsuxseg3ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_nxv4i16_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vsuxseg4ei32.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_mask_nxv4i16_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vsuxseg4ei32.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_nxv4i16_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vsuxseg4ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_mask_nxv4i16_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vsuxseg4ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
define void @test_vsuxseg4_nxv4i16_nxv4i64(<vscale x 4 x i16> %val, i16* %base, <vscale x 4 x i64> %index, i64 %vl) {
; CHECK-LABEL: test_vsuxseg4_nxv4i16_nxv4i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsuxseg4_mask_nxv4i16_nxv4i64(<vscale x 4 x i16> %val, i16* %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsuxseg4_mask_nxv4i16_nxv4i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
; CHECK-LABEL: test_vsuxseg4_nxv4i16_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vsuxseg4ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_mask_nxv4i16_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vsuxseg4ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_nxv4i16_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vsuxseg5ei32.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_mask_nxv4i16_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vsuxseg5ei32.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_nxv4i16_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vsuxseg5ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_mask_nxv4i16_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vsuxseg5ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
define void @test_vsuxseg5_nxv4i16_nxv4i64(<vscale x 4 x i16> %val, i16* %base, <vscale x 4 x i64> %index, i64 %vl) {
; CHECK-LABEL: test_vsuxseg5_nxv4i16_nxv4i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v16, v8
-; CHECK-NEXT: vmv1r.v v17, v16
-; CHECK-NEXT: vmv1r.v v18, v16
-; CHECK-NEXT: vmv1r.v v19, v16
-; CHECK-NEXT: vmv1r.v v20, v16
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv4r.v v16, v12
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
-; CHECK-NEXT: vsuxseg5ei64.v v16, (a0), v12
+; CHECK-NEXT: vsuxseg5ei64.v v8, (a0), v16
; CHECK-NEXT: ret
entry:
tail call void @llvm.riscv.vsuxseg5.nxv4i16.nxv4i64(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 4 x i64> %index, i64 %vl)
define void @test_vsuxseg5_mask_nxv4i16_nxv4i64(<vscale x 4 x i16> %val, i16* %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsuxseg5_mask_nxv4i16_nxv4i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v16, v8
-; CHECK-NEXT: vmv1r.v v17, v16
-; CHECK-NEXT: vmv1r.v v18, v16
-; CHECK-NEXT: vmv1r.v v19, v16
-; CHECK-NEXT: vmv1r.v v20, v16
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv4r.v v16, v12
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
-; CHECK-NEXT: vsuxseg5ei64.v v16, (a0), v12, v0.t
+; CHECK-NEXT: vsuxseg5ei64.v v8, (a0), v16, v0.t
; CHECK-NEXT: ret
entry:
tail call void @llvm.riscv.vsuxseg5.mask.nxv4i16.nxv4i64(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, i16* %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl)
; CHECK-LABEL: test_vsuxseg5_nxv4i16_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vsuxseg5ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_mask_nxv4i16_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vsuxseg5ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_nxv4i16_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vsuxseg6ei32.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_mask_nxv4i16_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vsuxseg6ei32.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_nxv4i16_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vsuxseg6ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_mask_nxv4i16_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vsuxseg6ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_nxv4i16_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v16, v8
-; CHECK-NEXT: vmv1r.v v17, v16
-; CHECK-NEXT: vmv1r.v v18, v16
-; CHECK-NEXT: vmv1r.v v19, v16
-; CHECK-NEXT: vmv1r.v v20, v16
-; CHECK-NEXT: vmv1r.v v21, v16
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
+; CHECK-NEXT: vmv1r.v v19, v8
+; CHECK-NEXT: vmv1r.v v20, v8
+; CHECK-NEXT: vmv1r.v v21, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vsuxseg6ei64.v v16, (a0), v12
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_mask_nxv4i16_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v16, v8
-; CHECK-NEXT: vmv1r.v v17, v16
-; CHECK-NEXT: vmv1r.v v18, v16
-; CHECK-NEXT: vmv1r.v v19, v16
-; CHECK-NEXT: vmv1r.v v20, v16
-; CHECK-NEXT: vmv1r.v v21, v16
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
+; CHECK-NEXT: vmv1r.v v19, v8
+; CHECK-NEXT: vmv1r.v v20, v8
+; CHECK-NEXT: vmv1r.v v21, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vsuxseg6ei64.v v16, (a0), v12, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_nxv4i16_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vsuxseg6ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_mask_nxv4i16_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vsuxseg6ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_nxv4i16_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
-; CHECK-NEXT: vmv1r.v v18, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vsuxseg7ei32.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_mask_nxv4i16_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
-; CHECK-NEXT: vmv1r.v v18, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vsuxseg7ei32.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_nxv4i16_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vsuxseg7ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_mask_nxv4i16_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vsuxseg7ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_nxv4i16_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v16, v8
-; CHECK-NEXT: vmv1r.v v17, v16
-; CHECK-NEXT: vmv1r.v v18, v16
-; CHECK-NEXT: vmv1r.v v19, v16
-; CHECK-NEXT: vmv1r.v v20, v16
-; CHECK-NEXT: vmv1r.v v21, v16
-; CHECK-NEXT: vmv1r.v v22, v16
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
+; CHECK-NEXT: vmv1r.v v19, v8
+; CHECK-NEXT: vmv1r.v v20, v8
+; CHECK-NEXT: vmv1r.v v21, v8
+; CHECK-NEXT: vmv1r.v v22, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vsuxseg7ei64.v v16, (a0), v12
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_mask_nxv4i16_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v16, v8
-; CHECK-NEXT: vmv1r.v v17, v16
-; CHECK-NEXT: vmv1r.v v18, v16
-; CHECK-NEXT: vmv1r.v v19, v16
-; CHECK-NEXT: vmv1r.v v20, v16
-; CHECK-NEXT: vmv1r.v v21, v16
-; CHECK-NEXT: vmv1r.v v22, v16
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
+; CHECK-NEXT: vmv1r.v v19, v8
+; CHECK-NEXT: vmv1r.v v20, v8
+; CHECK-NEXT: vmv1r.v v21, v8
+; CHECK-NEXT: vmv1r.v v22, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vsuxseg7ei64.v v16, (a0), v12, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_nxv4i16_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vsuxseg7ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_mask_nxv4i16_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vsuxseg7ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_nxv4i16_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
-; CHECK-NEXT: vmv1r.v v18, v12
-; CHECK-NEXT: vmv1r.v v19, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
+; CHECK-NEXT: vmv1r.v v19, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vsuxseg8ei32.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_mask_nxv4i16_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
-; CHECK-NEXT: vmv1r.v v18, v12
-; CHECK-NEXT: vmv1r.v v19, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
+; CHECK-NEXT: vmv1r.v v19, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vsuxseg8ei32.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_nxv4i16_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vsuxseg8ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_mask_nxv4i16_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vsuxseg8ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_nxv4i16_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v16, v8
-; CHECK-NEXT: vmv1r.v v17, v16
-; CHECK-NEXT: vmv1r.v v18, v16
-; CHECK-NEXT: vmv1r.v v19, v16
-; CHECK-NEXT: vmv1r.v v20, v16
-; CHECK-NEXT: vmv1r.v v21, v16
-; CHECK-NEXT: vmv1r.v v22, v16
-; CHECK-NEXT: vmv1r.v v23, v16
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
+; CHECK-NEXT: vmv1r.v v19, v8
+; CHECK-NEXT: vmv1r.v v20, v8
+; CHECK-NEXT: vmv1r.v v21, v8
+; CHECK-NEXT: vmv1r.v v22, v8
+; CHECK-NEXT: vmv1r.v v23, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vsuxseg8ei64.v v16, (a0), v12
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_mask_nxv4i16_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v16, v8
-; CHECK-NEXT: vmv1r.v v17, v16
-; CHECK-NEXT: vmv1r.v v18, v16
-; CHECK-NEXT: vmv1r.v v19, v16
-; CHECK-NEXT: vmv1r.v v20, v16
-; CHECK-NEXT: vmv1r.v v21, v16
-; CHECK-NEXT: vmv1r.v v22, v16
-; CHECK-NEXT: vmv1r.v v23, v16
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
+; CHECK-NEXT: vmv1r.v v19, v8
+; CHECK-NEXT: vmv1r.v v20, v8
+; CHECK-NEXT: vmv1r.v v21, v8
+; CHECK-NEXT: vmv1r.v v22, v8
+; CHECK-NEXT: vmv1r.v v23, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vsuxseg8ei64.v v16, (a0), v12, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_nxv4i16_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vsuxseg8ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_mask_nxv4i16_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vsuxseg8ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
define void @test_vsuxseg2_nxv1i8_nxv1i64(<vscale x 1 x i8> %val, i8* %base, <vscale x 1 x i64> %index, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv1i8_nxv1i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
define void @test_vsuxseg2_mask_nxv1i8_nxv1i64(<vscale x 1 x i8> %val, i8* %base, <vscale x 1 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv1i8_nxv1i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
define void @test_vsuxseg2_nxv1i8_nxv1i32(<vscale x 1 x i8> %val, i8* %base, <vscale x 1 x i32> %index, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv1i8_nxv1i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
define void @test_vsuxseg2_mask_nxv1i8_nxv1i32(<vscale x 1 x i8> %val, i8* %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv1i8_nxv1i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
define void @test_vsuxseg2_nxv1i8_nxv1i16(<vscale x 1 x i8> %val, i8* %base, <vscale x 1 x i16> %index, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv1i8_nxv1i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
define void @test_vsuxseg2_mask_nxv1i8_nxv1i16(<vscale x 1 x i8> %val, i8* %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv1i8_nxv1i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
define void @test_vsuxseg2_nxv1i8_nxv1i8(<vscale x 1 x i8> %val, i8* %base, <vscale x 1 x i8> %index, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv1i8_nxv1i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
define void @test_vsuxseg2_mask_nxv1i8_nxv1i8(<vscale x 1 x i8> %val, i8* %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv1i8_nxv1i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-LABEL: test_vsuxseg3_nxv1i8_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vsuxseg3ei64.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_mask_nxv1i8_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vsuxseg3ei64.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_nxv1i8_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vsuxseg3ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_mask_nxv1i8_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vsuxseg3ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_nxv1i8_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vsuxseg3ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_mask_nxv1i8_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vsuxseg3ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_nxv1i8_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vsuxseg3ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_mask_nxv1i8_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vsuxseg3ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_nxv1i8_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vsuxseg4ei64.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_mask_nxv1i8_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vsuxseg4ei64.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_nxv1i8_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vsuxseg4ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_mask_nxv1i8_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vsuxseg4ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_nxv1i8_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vsuxseg4ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_mask_nxv1i8_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vsuxseg4ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_nxv1i8_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vsuxseg4ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_mask_nxv1i8_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vsuxseg4ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_nxv1i8_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vsuxseg5ei64.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_mask_nxv1i8_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vsuxseg5ei64.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_nxv1i8_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vsuxseg5ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_mask_nxv1i8_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vsuxseg5ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_nxv1i8_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vsuxseg5ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_mask_nxv1i8_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vsuxseg5ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_nxv1i8_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vsuxseg5ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_mask_nxv1i8_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vsuxseg5ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_nxv1i8_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vsuxseg6ei64.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_mask_nxv1i8_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vsuxseg6ei64.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_nxv1i8_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vsuxseg6ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_mask_nxv1i8_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vsuxseg6ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_nxv1i8_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vsuxseg6ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_mask_nxv1i8_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vsuxseg6ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_nxv1i8_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vsuxseg6ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_mask_nxv1i8_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vsuxseg6ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_nxv1i8_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vsuxseg7ei64.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_mask_nxv1i8_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vsuxseg7ei64.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_nxv1i8_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vsuxseg7ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_mask_nxv1i8_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vsuxseg7ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_nxv1i8_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vsuxseg7ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_mask_nxv1i8_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vsuxseg7ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_nxv1i8_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vsuxseg7ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_mask_nxv1i8_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vsuxseg7ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_nxv1i8_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vsuxseg8ei64.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_mask_nxv1i8_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vsuxseg8ei64.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_nxv1i8_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vsuxseg8ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_mask_nxv1i8_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vsuxseg8ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_nxv1i8_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vsuxseg8ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_mask_nxv1i8_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vsuxseg8ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_nxv1i8_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vsuxseg8ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_mask_nxv1i8_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vsuxseg8ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
define void @test_vsuxseg2_nxv2i8_nxv2i32(<vscale x 2 x i8> %val, i8* %base, <vscale x 2 x i32> %index, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv2i8_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
define void @test_vsuxseg2_mask_nxv2i8_nxv2i32(<vscale x 2 x i8> %val, i8* %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv2i8_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
define void @test_vsuxseg2_nxv2i8_nxv2i8(<vscale x 2 x i8> %val, i8* %base, <vscale x 2 x i8> %index, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv2i8_nxv2i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
define void @test_vsuxseg2_mask_nxv2i8_nxv2i8(<vscale x 2 x i8> %val, i8* %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv2i8_nxv2i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
define void @test_vsuxseg2_nxv2i8_nxv2i16(<vscale x 2 x i8> %val, i8* %base, <vscale x 2 x i16> %index, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv2i8_nxv2i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
define void @test_vsuxseg2_mask_nxv2i8_nxv2i16(<vscale x 2 x i8> %val, i8* %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv2i8_nxv2i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
define void @test_vsuxseg2_nxv2i8_nxv2i64(<vscale x 2 x i8> %val, i8* %base, <vscale x 2 x i64> %index, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv2i8_nxv2i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vsuxseg2ei64.v v8, (a0), v10
define void @test_vsuxseg2_mask_nxv2i8_nxv2i64(<vscale x 2 x i8> %val, i8* %base, <vscale x 2 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv2i8_nxv2i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vsuxseg2ei64.v v8, (a0), v10, v0.t
; CHECK-LABEL: test_vsuxseg3_nxv2i8_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vsuxseg3ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_mask_nxv2i8_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vsuxseg3ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_nxv2i8_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vsuxseg3ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_mask_nxv2i8_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vsuxseg3ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_nxv2i8_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vsuxseg3ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_mask_nxv2i8_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vsuxseg3ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
define void @test_vsuxseg3_nxv2i8_nxv2i64(<vscale x 2 x i8> %val, i8* %base, <vscale x 2 x i64> %index, i64 %vl) {
; CHECK-LABEL: test_vsuxseg3_nxv2i8_nxv2i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv2r.v v12, v10
+; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
-; CHECK-NEXT: vsuxseg3ei64.v v12, (a0), v10
+; CHECK-NEXT: vsuxseg3ei64.v v8, (a0), v12
; CHECK-NEXT: ret
entry:
tail call void @llvm.riscv.vsuxseg3.nxv2i8.nxv2i64(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, i8* %base, <vscale x 2 x i64> %index, i64 %vl)
define void @test_vsuxseg3_mask_nxv2i8_nxv2i64(<vscale x 2 x i8> %val, i8* %base, <vscale x 2 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsuxseg3_mask_nxv2i8_nxv2i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv2r.v v12, v10
+; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
-; CHECK-NEXT: vsuxseg3ei64.v v12, (a0), v10, v0.t
+; CHECK-NEXT: vsuxseg3ei64.v v8, (a0), v12, v0.t
; CHECK-NEXT: ret
entry:
tail call void @llvm.riscv.vsuxseg3.mask.nxv2i8.nxv2i64(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, i8* %base, <vscale x 2 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl)
; CHECK-LABEL: test_vsuxseg4_nxv2i8_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vsuxseg4ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_mask_nxv2i8_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vsuxseg4ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_nxv2i8_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vsuxseg4ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_mask_nxv2i8_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vsuxseg4ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_nxv2i8_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vsuxseg4ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_mask_nxv2i8_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vsuxseg4ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_nxv2i8_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vsuxseg4ei64.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_mask_nxv2i8_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vsuxseg4ei64.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_nxv2i8_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vsuxseg5ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_mask_nxv2i8_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vsuxseg5ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_nxv2i8_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vsuxseg5ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_mask_nxv2i8_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vsuxseg5ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_nxv2i8_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vsuxseg5ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_mask_nxv2i8_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vsuxseg5ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_nxv2i8_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vsuxseg5ei64.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_mask_nxv2i8_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vsuxseg5ei64.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_nxv2i8_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vsuxseg6ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_mask_nxv2i8_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vsuxseg6ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_nxv2i8_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vsuxseg6ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_mask_nxv2i8_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vsuxseg6ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_nxv2i8_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vsuxseg6ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_mask_nxv2i8_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vsuxseg6ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_nxv2i8_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vsuxseg6ei64.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_mask_nxv2i8_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vsuxseg6ei64.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_nxv2i8_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vsuxseg7ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_mask_nxv2i8_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vsuxseg7ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_nxv2i8_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vsuxseg7ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_mask_nxv2i8_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vsuxseg7ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_nxv2i8_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vsuxseg7ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_mask_nxv2i8_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vsuxseg7ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_nxv2i8_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
-; CHECK-NEXT: vmv1r.v v18, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vsuxseg7ei64.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_mask_nxv2i8_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
-; CHECK-NEXT: vmv1r.v v18, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vsuxseg7ei64.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_nxv2i8_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vsuxseg8ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_mask_nxv2i8_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vsuxseg8ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_nxv2i8_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vsuxseg8ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_mask_nxv2i8_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vsuxseg8ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_nxv2i8_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vsuxseg8ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_mask_nxv2i8_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vsuxseg8ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_nxv2i8_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
-; CHECK-NEXT: vmv1r.v v18, v12
-; CHECK-NEXT: vmv1r.v v19, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
+; CHECK-NEXT: vmv1r.v v19, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vsuxseg8ei64.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_mask_nxv2i8_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
-; CHECK-NEXT: vmv1r.v v18, v12
-; CHECK-NEXT: vmv1r.v v19, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
+; CHECK-NEXT: vmv1r.v v19, v8
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vsuxseg8ei64.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
define void @test_vsuxseg2_nxv8i32_nxv8i16(<vscale x 8 x i32> %val, i32* %base, <vscale x 8 x i16> %index, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv8i32_nxv8i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
; CHECK-NEXT: vmv2r.v v16, v12
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
define void @test_vsuxseg2_mask_nxv8i32_nxv8i16(<vscale x 8 x i32> %val, i32* %base, <vscale x 8 x i16> %index, <vscale x 8 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv8i32_nxv8i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
; CHECK-NEXT: vmv2r.v v16, v12
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
define void @test_vsuxseg2_nxv8i32_nxv8i8(<vscale x 8 x i32> %val, i32* %base, <vscale x 8 x i8> %index, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv8i32_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
; CHECK-NEXT: vmv1r.v v16, v12
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
define void @test_vsuxseg2_mask_nxv8i32_nxv8i8(<vscale x 8 x i32> %val, i32* %base, <vscale x 8 x i8> %index, <vscale x 8 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv8i32_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
; CHECK-NEXT: vmv1r.v v16, v12
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
define void @test_vsuxseg2_nxv8i32_nxv8i64(<vscale x 8 x i32> %val, i32* %base, <vscale x 8 x i64> %index, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv8i32_nxv8i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
; CHECK-NEXT: vsuxseg2ei64.v v8, (a0), v16
define void @test_vsuxseg2_mask_nxv8i32_nxv8i64(<vscale x 8 x i32> %val, i32* %base, <vscale x 8 x i64> %index, <vscale x 8 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv8i32_nxv8i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
; CHECK-NEXT: vsuxseg2ei64.v v8, (a0), v16, v0.t
define void @test_vsuxseg2_nxv8i32_nxv8i32(<vscale x 8 x i32> %val, i32* %base, <vscale x 8 x i32> %index, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv8i32_nxv8i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
; CHECK-NEXT: vmv4r.v v16, v12
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
define void @test_vsuxseg2_mask_nxv8i32_nxv8i32(<vscale x 8 x i32> %val, i32* %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv8i32_nxv8i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
; CHECK-NEXT: vmv4r.v v16, v12
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
define void @test_vsuxseg2_nxv32i8_nxv32i16(<vscale x 32 x i8> %val, i8* %base, <vscale x 32 x i16> %index, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv32i8_nxv32i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu
; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v16
define void @test_vsuxseg2_mask_nxv32i8_nxv32i16(<vscale x 32 x i8> %val, i8* %base, <vscale x 32 x i16> %index, <vscale x 32 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv32i8_nxv32i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu
; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v16, v0.t
define void @test_vsuxseg2_nxv32i8_nxv32i8(<vscale x 32 x i8> %val, i8* %base, <vscale x 32 x i8> %index, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv32i8_nxv32i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
; CHECK-NEXT: vmv4r.v v16, v12
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu
define void @test_vsuxseg2_mask_nxv32i8_nxv32i8(<vscale x 32 x i8> %val, i8* %base, <vscale x 32 x i8> %index, <vscale x 32 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv32i8_nxv32i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
; CHECK-NEXT: vmv4r.v v16, v12
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu
define void @test_vsuxseg2_nxv2i16_nxv2i32(<vscale x 2 x i16> %val, i16* %base, <vscale x 2 x i32> %index, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv2i16_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
define void @test_vsuxseg2_mask_nxv2i16_nxv2i32(<vscale x 2 x i16> %val, i16* %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv2i16_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
define void @test_vsuxseg2_nxv2i16_nxv2i8(<vscale x 2 x i16> %val, i16* %base, <vscale x 2 x i8> %index, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv2i16_nxv2i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
define void @test_vsuxseg2_mask_nxv2i16_nxv2i8(<vscale x 2 x i16> %val, i16* %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv2i16_nxv2i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
define void @test_vsuxseg2_nxv2i16_nxv2i16(<vscale x 2 x i16> %val, i16* %base, <vscale x 2 x i16> %index, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv2i16_nxv2i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
define void @test_vsuxseg2_mask_nxv2i16_nxv2i16(<vscale x 2 x i16> %val, i16* %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv2i16_nxv2i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
define void @test_vsuxseg2_nxv2i16_nxv2i64(<vscale x 2 x i16> %val, i16* %base, <vscale x 2 x i64> %index, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv2i16_nxv2i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vsuxseg2ei64.v v8, (a0), v10
define void @test_vsuxseg2_mask_nxv2i16_nxv2i64(<vscale x 2 x i16> %val, i16* %base, <vscale x 2 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv2i16_nxv2i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vsuxseg2ei64.v v8, (a0), v10, v0.t
; CHECK-LABEL: test_vsuxseg3_nxv2i16_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vsuxseg3ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_mask_nxv2i16_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vsuxseg3ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_nxv2i16_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vsuxseg3ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_mask_nxv2i16_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vsuxseg3ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_nxv2i16_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vsuxseg3ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_mask_nxv2i16_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vsuxseg3ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
define void @test_vsuxseg3_nxv2i16_nxv2i64(<vscale x 2 x i16> %val, i16* %base, <vscale x 2 x i64> %index, i64 %vl) {
; CHECK-LABEL: test_vsuxseg3_nxv2i16_nxv2i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv2r.v v12, v10
+; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
-; CHECK-NEXT: vsuxseg3ei64.v v12, (a0), v10
+; CHECK-NEXT: vsuxseg3ei64.v v8, (a0), v12
; CHECK-NEXT: ret
entry:
tail call void @llvm.riscv.vsuxseg3.nxv2i16.nxv2i64(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, i16* %base, <vscale x 2 x i64> %index, i64 %vl)
define void @test_vsuxseg3_mask_nxv2i16_nxv2i64(<vscale x 2 x i16> %val, i16* %base, <vscale x 2 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsuxseg3_mask_nxv2i16_nxv2i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv2r.v v12, v10
+; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
-; CHECK-NEXT: vsuxseg3ei64.v v12, (a0), v10, v0.t
+; CHECK-NEXT: vsuxseg3ei64.v v8, (a0), v12, v0.t
; CHECK-NEXT: ret
entry:
tail call void @llvm.riscv.vsuxseg3.mask.nxv2i16.nxv2i64(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, i16* %base, <vscale x 2 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl)
; CHECK-LABEL: test_vsuxseg4_nxv2i16_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vsuxseg4ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_mask_nxv2i16_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vsuxseg4ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_nxv2i16_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vsuxseg4ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_mask_nxv2i16_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vsuxseg4ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_nxv2i16_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vsuxseg4ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_mask_nxv2i16_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vsuxseg4ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_nxv2i16_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vsuxseg4ei64.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_mask_nxv2i16_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vsuxseg4ei64.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_nxv2i16_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vsuxseg5ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_mask_nxv2i16_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vsuxseg5ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_nxv2i16_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vsuxseg5ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_mask_nxv2i16_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vsuxseg5ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_nxv2i16_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vsuxseg5ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_mask_nxv2i16_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vsuxseg5ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_nxv2i16_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vsuxseg5ei64.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_mask_nxv2i16_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vsuxseg5ei64.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_nxv2i16_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vsuxseg6ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_mask_nxv2i16_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vsuxseg6ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_nxv2i16_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vsuxseg6ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_mask_nxv2i16_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vsuxseg6ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_nxv2i16_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vsuxseg6ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_mask_nxv2i16_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vsuxseg6ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_nxv2i16_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vsuxseg6ei64.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_mask_nxv2i16_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vsuxseg6ei64.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_nxv2i16_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vsuxseg7ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_mask_nxv2i16_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vsuxseg7ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_nxv2i16_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vsuxseg7ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_mask_nxv2i16_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vsuxseg7ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_nxv2i16_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vsuxseg7ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_mask_nxv2i16_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vsuxseg7ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_nxv2i16_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
-; CHECK-NEXT: vmv1r.v v18, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vsuxseg7ei64.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_mask_nxv2i16_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
-; CHECK-NEXT: vmv1r.v v18, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vsuxseg7ei64.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_nxv2i16_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vsuxseg8ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_mask_nxv2i16_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vsuxseg8ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_nxv2i16_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vsuxseg8ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_mask_nxv2i16_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vsuxseg8ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_nxv2i16_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vsuxseg8ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_mask_nxv2i16_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vsuxseg8ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_nxv2i16_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
-; CHECK-NEXT: vmv1r.v v18, v12
-; CHECK-NEXT: vmv1r.v v19, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
+; CHECK-NEXT: vmv1r.v v19, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vsuxseg8ei64.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_mask_nxv2i16_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
-; CHECK-NEXT: vmv1r.v v18, v12
-; CHECK-NEXT: vmv1r.v v19, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
+; CHECK-NEXT: vmv1r.v v19, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vsuxseg8ei64.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
define void @test_vsuxseg2_nxv2i64_nxv2i32(<vscale x 2 x i64> %val, i64* %base, <vscale x 2 x i32> %index, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv2i64_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
; CHECK-NEXT: vmv1r.v v12, v10
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
define void @test_vsuxseg2_mask_nxv2i64_nxv2i32(<vscale x 2 x i64> %val, i64* %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv2i64_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
; CHECK-NEXT: vmv1r.v v12, v10
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
define void @test_vsuxseg2_nxv2i64_nxv2i8(<vscale x 2 x i64> %val, i64* %base, <vscale x 2 x i8> %index, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv2i64_nxv2i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
; CHECK-NEXT: vmv1r.v v12, v10
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
define void @test_vsuxseg2_mask_nxv2i64_nxv2i8(<vscale x 2 x i64> %val, i64* %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv2i64_nxv2i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
; CHECK-NEXT: vmv1r.v v12, v10
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
define void @test_vsuxseg2_nxv2i64_nxv2i16(<vscale x 2 x i64> %val, i64* %base, <vscale x 2 x i16> %index, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv2i64_nxv2i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
; CHECK-NEXT: vmv1r.v v12, v10
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
define void @test_vsuxseg2_mask_nxv2i64_nxv2i16(<vscale x 2 x i64> %val, i64* %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv2i64_nxv2i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
; CHECK-NEXT: vmv1r.v v12, v10
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
define void @test_vsuxseg2_nxv2i64_nxv2i64(<vscale x 2 x i64> %val, i64* %base, <vscale x 2 x i64> %index, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv2i64_nxv2i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
; CHECK-NEXT: vmv2r.v v12, v10
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
define void @test_vsuxseg2_mask_nxv2i64_nxv2i64(<vscale x 2 x i64> %val, i64* %base, <vscale x 2 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv2i64_nxv2i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
; CHECK-NEXT: vmv2r.v v12, v10
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
; CHECK-LABEL: test_vsuxseg3_nxv2i64_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
; CHECK-NEXT: vsuxseg3ei32.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_mask_nxv2i64_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
; CHECK-NEXT: vsuxseg3ei32.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_nxv2i64_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
; CHECK-NEXT: vsuxseg3ei8.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_mask_nxv2i64_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
; CHECK-NEXT: vsuxseg3ei8.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_nxv2i64_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
; CHECK-NEXT: vsuxseg3ei16.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_mask_nxv2i64_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
; CHECK-NEXT: vsuxseg3ei16.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_nxv2i64_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
; CHECK-NEXT: vsuxseg3ei64.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_mask_nxv2i64_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
; CHECK-NEXT: vsuxseg3ei64.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_nxv2i64_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
; CHECK-NEXT: vsuxseg4ei32.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_mask_nxv2i64_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
; CHECK-NEXT: vsuxseg4ei32.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_nxv2i64_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
; CHECK-NEXT: vsuxseg4ei8.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_mask_nxv2i64_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
; CHECK-NEXT: vsuxseg4ei8.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_nxv2i64_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
; CHECK-NEXT: vsuxseg4ei16.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_mask_nxv2i64_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
; CHECK-NEXT: vsuxseg4ei16.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_nxv2i64_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
; CHECK-NEXT: vsuxseg4ei64.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_mask_nxv2i64_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
; CHECK-NEXT: vsuxseg4ei64.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
define void @test_vsuxseg2_nxv16f16_nxv16i16(<vscale x 16 x half> %val, half* %base, <vscale x 16 x i16> %index, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv16f16_nxv16i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
; CHECK-NEXT: vmv4r.v v16, v12
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
define void @test_vsuxseg2_mask_nxv16f16_nxv16i16(<vscale x 16 x half> %val, half* %base, <vscale x 16 x i16> %index, <vscale x 16 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv16f16_nxv16i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
; CHECK-NEXT: vmv4r.v v16, v12
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
define void @test_vsuxseg2_nxv16f16_nxv16i8(<vscale x 16 x half> %val, half* %base, <vscale x 16 x i8> %index, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv16f16_nxv16i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
; CHECK-NEXT: vmv2r.v v16, v12
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
define void @test_vsuxseg2_mask_nxv16f16_nxv16i8(<vscale x 16 x half> %val, half* %base, <vscale x 16 x i8> %index, <vscale x 16 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv16f16_nxv16i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
; CHECK-NEXT: vmv2r.v v16, v12
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
define void @test_vsuxseg2_nxv16f16_nxv16i32(<vscale x 16 x half> %val, half* %base, <vscale x 16 x i32> %index, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv16f16_nxv16i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v16
define void @test_vsuxseg2_mask_nxv16f16_nxv16i32(<vscale x 16 x half> %val, half* %base, <vscale x 16 x i32> %index, <vscale x 16 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv16f16_nxv16i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v16, v0.t
define void @test_vsuxseg2_nxv4f64_nxv4i32(<vscale x 4 x double> %val, double* %base, <vscale x 4 x i32> %index, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv4f64_nxv4i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
; CHECK-NEXT: vmv2r.v v16, v12
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu
define void @test_vsuxseg2_mask_nxv4f64_nxv4i32(<vscale x 4 x double> %val, double* %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv4f64_nxv4i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
; CHECK-NEXT: vmv2r.v v16, v12
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu
define void @test_vsuxseg2_nxv4f64_nxv4i8(<vscale x 4 x double> %val, double* %base, <vscale x 4 x i8> %index, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv4f64_nxv4i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
; CHECK-NEXT: vmv1r.v v16, v12
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu
define void @test_vsuxseg2_mask_nxv4f64_nxv4i8(<vscale x 4 x double> %val, double* %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv4f64_nxv4i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
; CHECK-NEXT: vmv1r.v v16, v12
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu
define void @test_vsuxseg2_nxv4f64_nxv4i64(<vscale x 4 x double> %val, double* %base, <vscale x 4 x i64> %index, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv4f64_nxv4i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
; CHECK-NEXT: vmv4r.v v16, v12
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu
define void @test_vsuxseg2_mask_nxv4f64_nxv4i64(<vscale x 4 x double> %val, double* %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv4f64_nxv4i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
; CHECK-NEXT: vmv4r.v v16, v12
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu
define void @test_vsuxseg2_nxv4f64_nxv4i16(<vscale x 4 x double> %val, double* %base, <vscale x 4 x i16> %index, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv4f64_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
; CHECK-NEXT: vmv1r.v v16, v12
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu
define void @test_vsuxseg2_mask_nxv4f64_nxv4i16(<vscale x 4 x double> %val, double* %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv4f64_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
; CHECK-NEXT: vmv1r.v v16, v12
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu
define void @test_vsuxseg2_nxv1f64_nxv1i64(<vscale x 1 x double> %val, double* %base, <vscale x 1 x i64> %index, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv1f64_nxv1i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
define void @test_vsuxseg2_mask_nxv1f64_nxv1i64(<vscale x 1 x double> %val, double* %base, <vscale x 1 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv1f64_nxv1i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
define void @test_vsuxseg2_nxv1f64_nxv1i32(<vscale x 1 x double> %val, double* %base, <vscale x 1 x i32> %index, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv1f64_nxv1i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
define void @test_vsuxseg2_mask_nxv1f64_nxv1i32(<vscale x 1 x double> %val, double* %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv1f64_nxv1i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
define void @test_vsuxseg2_nxv1f64_nxv1i16(<vscale x 1 x double> %val, double* %base, <vscale x 1 x i16> %index, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv1f64_nxv1i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
define void @test_vsuxseg2_mask_nxv1f64_nxv1i16(<vscale x 1 x double> %val, double* %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv1f64_nxv1i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
define void @test_vsuxseg2_nxv1f64_nxv1i8(<vscale x 1 x double> %val, double* %base, <vscale x 1 x i8> %index, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv1f64_nxv1i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
define void @test_vsuxseg2_mask_nxv1f64_nxv1i8(<vscale x 1 x double> %val, double* %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv1f64_nxv1i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-LABEL: test_vsuxseg3_nxv1f64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vsuxseg3ei64.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_mask_nxv1f64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vsuxseg3ei64.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_nxv1f64_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vsuxseg3ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_mask_nxv1f64_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vsuxseg3ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_nxv1f64_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vsuxseg3ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_mask_nxv1f64_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vsuxseg3ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_nxv1f64_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vsuxseg3ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_mask_nxv1f64_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vsuxseg3ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_nxv1f64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vsuxseg4ei64.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_mask_nxv1f64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vsuxseg4ei64.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_nxv1f64_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vsuxseg4ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_mask_nxv1f64_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vsuxseg4ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_nxv1f64_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vsuxseg4ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_mask_nxv1f64_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vsuxseg4ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_nxv1f64_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vsuxseg4ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_mask_nxv1f64_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vsuxseg4ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_nxv1f64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vsuxseg5ei64.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_mask_nxv1f64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vsuxseg5ei64.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_nxv1f64_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vsuxseg5ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_mask_nxv1f64_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vsuxseg5ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_nxv1f64_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vsuxseg5ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_mask_nxv1f64_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vsuxseg5ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_nxv1f64_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vsuxseg5ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_mask_nxv1f64_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vsuxseg5ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_nxv1f64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vsuxseg6ei64.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_mask_nxv1f64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vsuxseg6ei64.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_nxv1f64_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vsuxseg6ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_mask_nxv1f64_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vsuxseg6ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_nxv1f64_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vsuxseg6ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_mask_nxv1f64_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vsuxseg6ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_nxv1f64_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vsuxseg6ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_mask_nxv1f64_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vsuxseg6ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_nxv1f64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vsuxseg7ei64.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_mask_nxv1f64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vsuxseg7ei64.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_nxv1f64_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vsuxseg7ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_mask_nxv1f64_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vsuxseg7ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_nxv1f64_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vsuxseg7ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_mask_nxv1f64_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vsuxseg7ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_nxv1f64_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vsuxseg7ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_mask_nxv1f64_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vsuxseg7ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_nxv1f64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vsuxseg8ei64.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_mask_nxv1f64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vsuxseg8ei64.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_nxv1f64_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vsuxseg8ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_mask_nxv1f64_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vsuxseg8ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_nxv1f64_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vsuxseg8ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_mask_nxv1f64_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vsuxseg8ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_nxv1f64_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vsuxseg8ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_mask_nxv1f64_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vsuxseg8ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
define void @test_vsuxseg2_nxv2f32_nxv2i32(<vscale x 2 x float> %val, float* %base, <vscale x 2 x i32> %index, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv2f32_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
define void @test_vsuxseg2_mask_nxv2f32_nxv2i32(<vscale x 2 x float> %val, float* %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv2f32_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
define void @test_vsuxseg2_nxv2f32_nxv2i8(<vscale x 2 x float> %val, float* %base, <vscale x 2 x i8> %index, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv2f32_nxv2i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
define void @test_vsuxseg2_mask_nxv2f32_nxv2i8(<vscale x 2 x float> %val, float* %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv2f32_nxv2i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
define void @test_vsuxseg2_nxv2f32_nxv2i16(<vscale x 2 x float> %val, float* %base, <vscale x 2 x i16> %index, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv2f32_nxv2i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
define void @test_vsuxseg2_mask_nxv2f32_nxv2i16(<vscale x 2 x float> %val, float* %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv2f32_nxv2i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
define void @test_vsuxseg2_nxv2f32_nxv2i64(<vscale x 2 x float> %val, float* %base, <vscale x 2 x i64> %index, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv2f32_nxv2i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vsuxseg2ei64.v v8, (a0), v10
define void @test_vsuxseg2_mask_nxv2f32_nxv2i64(<vscale x 2 x float> %val, float* %base, <vscale x 2 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv2f32_nxv2i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vsuxseg2ei64.v v8, (a0), v10, v0.t
; CHECK-LABEL: test_vsuxseg3_nxv2f32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vsuxseg3ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_mask_nxv2f32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vsuxseg3ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_nxv2f32_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vsuxseg3ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_mask_nxv2f32_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vsuxseg3ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_nxv2f32_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vsuxseg3ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_mask_nxv2f32_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vsuxseg3ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
define void @test_vsuxseg3_nxv2f32_nxv2i64(<vscale x 2 x float> %val, float* %base, <vscale x 2 x i64> %index, i64 %vl) {
; CHECK-LABEL: test_vsuxseg3_nxv2f32_nxv2i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv2r.v v12, v10
+; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
-; CHECK-NEXT: vsuxseg3ei64.v v12, (a0), v10
+; CHECK-NEXT: vsuxseg3ei64.v v8, (a0), v12
; CHECK-NEXT: ret
entry:
tail call void @llvm.riscv.vsuxseg3.nxv2f32.nxv2i64(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, float* %base, <vscale x 2 x i64> %index, i64 %vl)
define void @test_vsuxseg3_mask_nxv2f32_nxv2i64(<vscale x 2 x float> %val, float* %base, <vscale x 2 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsuxseg3_mask_nxv2f32_nxv2i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv2r.v v12, v10
+; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
-; CHECK-NEXT: vsuxseg3ei64.v v12, (a0), v10, v0.t
+; CHECK-NEXT: vsuxseg3ei64.v v8, (a0), v12, v0.t
; CHECK-NEXT: ret
entry:
tail call void @llvm.riscv.vsuxseg3.mask.nxv2f32.nxv2i64(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, float* %base, <vscale x 2 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl)
; CHECK-LABEL: test_vsuxseg4_nxv2f32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vsuxseg4ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_mask_nxv2f32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vsuxseg4ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_nxv2f32_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vsuxseg4ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_mask_nxv2f32_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vsuxseg4ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_nxv2f32_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vsuxseg4ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_mask_nxv2f32_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vsuxseg4ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_nxv2f32_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vsuxseg4ei64.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_mask_nxv2f32_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vsuxseg4ei64.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_nxv2f32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vsuxseg5ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_mask_nxv2f32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vsuxseg5ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_nxv2f32_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vsuxseg5ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_mask_nxv2f32_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vsuxseg5ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_nxv2f32_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vsuxseg5ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_mask_nxv2f32_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vsuxseg5ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_nxv2f32_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vsuxseg5ei64.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_mask_nxv2f32_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vsuxseg5ei64.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_nxv2f32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vsuxseg6ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_mask_nxv2f32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vsuxseg6ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_nxv2f32_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vsuxseg6ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_mask_nxv2f32_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vsuxseg6ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_nxv2f32_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vsuxseg6ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_mask_nxv2f32_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vsuxseg6ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_nxv2f32_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vsuxseg6ei64.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_mask_nxv2f32_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vsuxseg6ei64.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_nxv2f32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vsuxseg7ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_mask_nxv2f32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vsuxseg7ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_nxv2f32_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vsuxseg7ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_mask_nxv2f32_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vsuxseg7ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_nxv2f32_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vsuxseg7ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_mask_nxv2f32_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vsuxseg7ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_nxv2f32_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
-; CHECK-NEXT: vmv1r.v v18, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vsuxseg7ei64.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_mask_nxv2f32_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
-; CHECK-NEXT: vmv1r.v v18, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vsuxseg7ei64.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_nxv2f32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vsuxseg8ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_mask_nxv2f32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vsuxseg8ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_nxv2f32_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vsuxseg8ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_mask_nxv2f32_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vsuxseg8ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_nxv2f32_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vsuxseg8ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_mask_nxv2f32_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vsuxseg8ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_nxv2f32_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
-; CHECK-NEXT: vmv1r.v v18, v12
-; CHECK-NEXT: vmv1r.v v19, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
+; CHECK-NEXT: vmv1r.v v19, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vsuxseg8ei64.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_mask_nxv2f32_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
-; CHECK-NEXT: vmv1r.v v18, v12
-; CHECK-NEXT: vmv1r.v v19, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
+; CHECK-NEXT: vmv1r.v v19, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vsuxseg8ei64.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
define void @test_vsuxseg2_nxv1f16_nxv1i64(<vscale x 1 x half> %val, half* %base, <vscale x 1 x i64> %index, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv1f16_nxv1i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
define void @test_vsuxseg2_mask_nxv1f16_nxv1i64(<vscale x 1 x half> %val, half* %base, <vscale x 1 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv1f16_nxv1i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
define void @test_vsuxseg2_nxv1f16_nxv1i32(<vscale x 1 x half> %val, half* %base, <vscale x 1 x i32> %index, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv1f16_nxv1i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
define void @test_vsuxseg2_mask_nxv1f16_nxv1i32(<vscale x 1 x half> %val, half* %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv1f16_nxv1i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
define void @test_vsuxseg2_nxv1f16_nxv1i16(<vscale x 1 x half> %val, half* %base, <vscale x 1 x i16> %index, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv1f16_nxv1i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
define void @test_vsuxseg2_mask_nxv1f16_nxv1i16(<vscale x 1 x half> %val, half* %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv1f16_nxv1i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
define void @test_vsuxseg2_nxv1f16_nxv1i8(<vscale x 1 x half> %val, half* %base, <vscale x 1 x i8> %index, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv1f16_nxv1i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
define void @test_vsuxseg2_mask_nxv1f16_nxv1i8(<vscale x 1 x half> %val, half* %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv1f16_nxv1i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-LABEL: test_vsuxseg3_nxv1f16_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vsuxseg3ei64.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_mask_nxv1f16_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vsuxseg3ei64.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_nxv1f16_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vsuxseg3ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_mask_nxv1f16_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vsuxseg3ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_nxv1f16_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vsuxseg3ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_mask_nxv1f16_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vsuxseg3ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_nxv1f16_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vsuxseg3ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_mask_nxv1f16_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vsuxseg3ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_nxv1f16_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vsuxseg4ei64.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_mask_nxv1f16_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vsuxseg4ei64.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_nxv1f16_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vsuxseg4ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_mask_nxv1f16_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vsuxseg4ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_nxv1f16_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vsuxseg4ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_mask_nxv1f16_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vsuxseg4ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_nxv1f16_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vsuxseg4ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_mask_nxv1f16_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vsuxseg4ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_nxv1f16_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vsuxseg5ei64.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_mask_nxv1f16_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vsuxseg5ei64.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_nxv1f16_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vsuxseg5ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_mask_nxv1f16_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vsuxseg5ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_nxv1f16_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vsuxseg5ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_mask_nxv1f16_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vsuxseg5ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_nxv1f16_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vsuxseg5ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_mask_nxv1f16_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vsuxseg5ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_nxv1f16_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vsuxseg6ei64.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_mask_nxv1f16_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vsuxseg6ei64.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_nxv1f16_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vsuxseg6ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_mask_nxv1f16_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vsuxseg6ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_nxv1f16_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vsuxseg6ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_mask_nxv1f16_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vsuxseg6ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_nxv1f16_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vsuxseg6ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_mask_nxv1f16_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vsuxseg6ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_nxv1f16_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vsuxseg7ei64.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_mask_nxv1f16_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vsuxseg7ei64.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_nxv1f16_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vsuxseg7ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_mask_nxv1f16_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vsuxseg7ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_nxv1f16_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vsuxseg7ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_mask_nxv1f16_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vsuxseg7ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_nxv1f16_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vsuxseg7ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_mask_nxv1f16_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vsuxseg7ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_nxv1f16_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vsuxseg8ei64.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_mask_nxv1f16_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vsuxseg8ei64.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_nxv1f16_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vsuxseg8ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_mask_nxv1f16_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vsuxseg8ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_nxv1f16_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vsuxseg8ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_mask_nxv1f16_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vsuxseg8ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_nxv1f16_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vsuxseg8ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_mask_nxv1f16_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vsuxseg8ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
define void @test_vsuxseg2_nxv1f32_nxv1i64(<vscale x 1 x float> %val, float* %base, <vscale x 1 x i64> %index, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv1f32_nxv1i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
define void @test_vsuxseg2_mask_nxv1f32_nxv1i64(<vscale x 1 x float> %val, float* %base, <vscale x 1 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv1f32_nxv1i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
define void @test_vsuxseg2_nxv1f32_nxv1i32(<vscale x 1 x float> %val, float* %base, <vscale x 1 x i32> %index, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv1f32_nxv1i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
define void @test_vsuxseg2_mask_nxv1f32_nxv1i32(<vscale x 1 x float> %val, float* %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv1f32_nxv1i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
define void @test_vsuxseg2_nxv1f32_nxv1i16(<vscale x 1 x float> %val, float* %base, <vscale x 1 x i16> %index, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv1f32_nxv1i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
define void @test_vsuxseg2_mask_nxv1f32_nxv1i16(<vscale x 1 x float> %val, float* %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv1f32_nxv1i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
define void @test_vsuxseg2_nxv1f32_nxv1i8(<vscale x 1 x float> %val, float* %base, <vscale x 1 x i8> %index, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv1f32_nxv1i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
define void @test_vsuxseg2_mask_nxv1f32_nxv1i8(<vscale x 1 x float> %val, float* %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv1f32_nxv1i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-LABEL: test_vsuxseg3_nxv1f32_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vsuxseg3ei64.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_mask_nxv1f32_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vsuxseg3ei64.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_nxv1f32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vsuxseg3ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_mask_nxv1f32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vsuxseg3ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_nxv1f32_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vsuxseg3ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_mask_nxv1f32_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vsuxseg3ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_nxv1f32_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vsuxseg3ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_mask_nxv1f32_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vsuxseg3ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_nxv1f32_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vsuxseg4ei64.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_mask_nxv1f32_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vsuxseg4ei64.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_nxv1f32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vsuxseg4ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_mask_nxv1f32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vsuxseg4ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_nxv1f32_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vsuxseg4ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_mask_nxv1f32_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vsuxseg4ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_nxv1f32_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vsuxseg4ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_mask_nxv1f32_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vsuxseg4ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_nxv1f32_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vsuxseg5ei64.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_mask_nxv1f32_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vsuxseg5ei64.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_nxv1f32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vsuxseg5ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_mask_nxv1f32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vsuxseg5ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_nxv1f32_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vsuxseg5ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_mask_nxv1f32_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vsuxseg5ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_nxv1f32_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vsuxseg5ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_mask_nxv1f32_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vsuxseg5ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_nxv1f32_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vsuxseg6ei64.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_mask_nxv1f32_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vsuxseg6ei64.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_nxv1f32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vsuxseg6ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_mask_nxv1f32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vsuxseg6ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_nxv1f32_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vsuxseg6ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_mask_nxv1f32_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vsuxseg6ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_nxv1f32_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vsuxseg6ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_mask_nxv1f32_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vsuxseg6ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_nxv1f32_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vsuxseg7ei64.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_mask_nxv1f32_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vsuxseg7ei64.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_nxv1f32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vsuxseg7ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_mask_nxv1f32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vsuxseg7ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_nxv1f32_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vsuxseg7ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_mask_nxv1f32_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vsuxseg7ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_nxv1f32_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vsuxseg7ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_mask_nxv1f32_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vsuxseg7ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_nxv1f32_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vsuxseg8ei64.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_mask_nxv1f32_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vsuxseg8ei64.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_nxv1f32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vsuxseg8ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_mask_nxv1f32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vsuxseg8ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_nxv1f32_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vsuxseg8ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_mask_nxv1f32_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vsuxseg8ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_nxv1f32_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vsuxseg8ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_mask_nxv1f32_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vsuxseg8ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
define void @test_vsuxseg2_nxv8f16_nxv8i16(<vscale x 8 x half> %val, half* %base, <vscale x 8 x i16> %index, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv8f16_nxv8i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
; CHECK-NEXT: vmv2r.v v12, v10
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
define void @test_vsuxseg2_mask_nxv8f16_nxv8i16(<vscale x 8 x half> %val, half* %base, <vscale x 8 x i16> %index, <vscale x 8 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv8f16_nxv8i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
; CHECK-NEXT: vmv2r.v v12, v10
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
define void @test_vsuxseg2_nxv8f16_nxv8i8(<vscale x 8 x half> %val, half* %base, <vscale x 8 x i8> %index, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv8f16_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
; CHECK-NEXT: vmv1r.v v12, v10
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
define void @test_vsuxseg2_mask_nxv8f16_nxv8i8(<vscale x 8 x half> %val, half* %base, <vscale x 8 x i8> %index, <vscale x 8 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv8f16_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
; CHECK-NEXT: vmv1r.v v12, v10
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
define void @test_vsuxseg2_nxv8f16_nxv8i64(<vscale x 8 x half> %val, half* %base, <vscale x 8 x i64> %index, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv8f16_nxv8i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
; CHECK-NEXT: vsuxseg2ei64.v v8, (a0), v16
define void @test_vsuxseg2_mask_nxv8f16_nxv8i64(<vscale x 8 x half> %val, half* %base, <vscale x 8 x i64> %index, <vscale x 8 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv8f16_nxv8i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
; CHECK-NEXT: vsuxseg2ei64.v v8, (a0), v16, v0.t
define void @test_vsuxseg2_nxv8f16_nxv8i32(<vscale x 8 x half> %val, half* %base, <vscale x 8 x i32> %index, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv8f16_nxv8i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v12
define void @test_vsuxseg2_mask_nxv8f16_nxv8i32(<vscale x 8 x half> %val, half* %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv8f16_nxv8i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v12, v0.t
; CHECK-LABEL: test_vsuxseg3_nxv8f16_nxv8i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
; CHECK-NEXT: vsuxseg3ei16.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_mask_nxv8f16_nxv8i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
; CHECK-NEXT: vsuxseg3ei16.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_nxv8f16_nxv8i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
; CHECK-NEXT: vsuxseg3ei8.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_mask_nxv8f16_nxv8i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
; CHECK-NEXT: vsuxseg3ei8.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
define void @test_vsuxseg3_nxv8f16_nxv8i64(<vscale x 8 x half> %val, half* %base, <vscale x 8 x i64> %index, i64 %vl) {
; CHECK-LABEL: test_vsuxseg3_nxv8f16_nxv8i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
define void @test_vsuxseg3_mask_nxv8f16_nxv8i64(<vscale x 8 x half> %val, half* %base, <vscale x 8 x i64> %index, <vscale x 8 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsuxseg3_mask_nxv8f16_nxv8i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
define void @test_vsuxseg3_nxv8f16_nxv8i32(<vscale x 8 x half> %val, half* %base, <vscale x 8 x i32> %index, i64 %vl) {
; CHECK-LABEL: test_vsuxseg3_nxv8f16_nxv8i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv2r.v v16, v8
-; CHECK-NEXT: vmv2r.v v18, v16
-; CHECK-NEXT: vmv2r.v v20, v16
+; CHECK-NEXT: vmv2r.v v10, v8
+; CHECK-NEXT: vmv4r.v v16, v12
+; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
-; CHECK-NEXT: vsuxseg3ei32.v v16, (a0), v12
+; CHECK-NEXT: vsuxseg3ei32.v v8, (a0), v16
; CHECK-NEXT: ret
entry:
tail call void @llvm.riscv.vsuxseg3.nxv8f16.nxv8i32(<vscale x 8 x half> %val,<vscale x 8 x half> %val,<vscale x 8 x half> %val, half* %base, <vscale x 8 x i32> %index, i64 %vl)
define void @test_vsuxseg3_mask_nxv8f16_nxv8i32(<vscale x 8 x half> %val, half* %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsuxseg3_mask_nxv8f16_nxv8i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv2r.v v16, v8
-; CHECK-NEXT: vmv2r.v v18, v16
-; CHECK-NEXT: vmv2r.v v20, v16
+; CHECK-NEXT: vmv2r.v v10, v8
+; CHECK-NEXT: vmv4r.v v16, v12
+; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
-; CHECK-NEXT: vsuxseg3ei32.v v16, (a0), v12, v0.t
+; CHECK-NEXT: vsuxseg3ei32.v v8, (a0), v16, v0.t
; CHECK-NEXT: ret
entry:
tail call void @llvm.riscv.vsuxseg3.mask.nxv8f16.nxv8i32(<vscale x 8 x half> %val,<vscale x 8 x half> %val,<vscale x 8 x half> %val, half* %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i64 %vl)
; CHECK-LABEL: test_vsuxseg4_nxv8f16_nxv8i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
; CHECK-NEXT: vsuxseg4ei16.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_mask_nxv8f16_nxv8i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
; CHECK-NEXT: vsuxseg4ei16.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_nxv8f16_nxv8i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
; CHECK-NEXT: vsuxseg4ei8.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_mask_nxv8f16_nxv8i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
; CHECK-NEXT: vsuxseg4ei8.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
define void @test_vsuxseg4_nxv8f16_nxv8i64(<vscale x 8 x half> %val, half* %base, <vscale x 8 x i64> %index, i64 %vl) {
; CHECK-LABEL: test_vsuxseg4_nxv8f16_nxv8i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2_v14m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vmv2r.v v14, v8
define void @test_vsuxseg4_mask_nxv8f16_nxv8i64(<vscale x 8 x half> %val, half* %base, <vscale x 8 x i64> %index, <vscale x 8 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsuxseg4_mask_nxv8f16_nxv8i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2_v14m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vmv2r.v v14, v8
; CHECK-LABEL: test_vsuxseg4_nxv8f16_nxv8i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v16, v8
-; CHECK-NEXT: vmv2r.v v18, v16
-; CHECK-NEXT: vmv2r.v v20, v16
-; CHECK-NEXT: vmv2r.v v22, v16
+; CHECK-NEXT: vmv2r.v v18, v8
+; CHECK-NEXT: vmv2r.v v20, v8
+; CHECK-NEXT: vmv2r.v v22, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
; CHECK-NEXT: vsuxseg4ei32.v v16, (a0), v12
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_mask_nxv8f16_nxv8i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v16, v8
-; CHECK-NEXT: vmv2r.v v18, v16
-; CHECK-NEXT: vmv2r.v v20, v16
-; CHECK-NEXT: vmv2r.v v22, v16
+; CHECK-NEXT: vmv2r.v v18, v8
+; CHECK-NEXT: vmv2r.v v20, v8
+; CHECK-NEXT: vmv2r.v v22, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
; CHECK-NEXT: vsuxseg4ei32.v v16, (a0), v12, v0.t
; CHECK-NEXT: ret
define void @test_vsuxseg2_nxv8f32_nxv8i16(<vscale x 8 x float> %val, float* %base, <vscale x 8 x i16> %index, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv8f32_nxv8i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
; CHECK-NEXT: vmv2r.v v16, v12
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
define void @test_vsuxseg2_mask_nxv8f32_nxv8i16(<vscale x 8 x float> %val, float* %base, <vscale x 8 x i16> %index, <vscale x 8 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv8f32_nxv8i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
; CHECK-NEXT: vmv2r.v v16, v12
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
define void @test_vsuxseg2_nxv8f32_nxv8i8(<vscale x 8 x float> %val, float* %base, <vscale x 8 x i8> %index, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv8f32_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
; CHECK-NEXT: vmv1r.v v16, v12
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
define void @test_vsuxseg2_mask_nxv8f32_nxv8i8(<vscale x 8 x float> %val, float* %base, <vscale x 8 x i8> %index, <vscale x 8 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv8f32_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
; CHECK-NEXT: vmv1r.v v16, v12
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
define void @test_vsuxseg2_nxv8f32_nxv8i64(<vscale x 8 x float> %val, float* %base, <vscale x 8 x i64> %index, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv8f32_nxv8i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
; CHECK-NEXT: vsuxseg2ei64.v v8, (a0), v16
define void @test_vsuxseg2_mask_nxv8f32_nxv8i64(<vscale x 8 x float> %val, float* %base, <vscale x 8 x i64> %index, <vscale x 8 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv8f32_nxv8i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
; CHECK-NEXT: vsuxseg2ei64.v v8, (a0), v16, v0.t
define void @test_vsuxseg2_nxv8f32_nxv8i32(<vscale x 8 x float> %val, float* %base, <vscale x 8 x i32> %index, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv8f32_nxv8i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
; CHECK-NEXT: vmv4r.v v16, v12
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
define void @test_vsuxseg2_mask_nxv8f32_nxv8i32(<vscale x 8 x float> %val, float* %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv8f32_nxv8i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
; CHECK-NEXT: vmv4r.v v16, v12
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
define void @test_vsuxseg2_nxv2f64_nxv2i32(<vscale x 2 x double> %val, double* %base, <vscale x 2 x i32> %index, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv2f64_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
; CHECK-NEXT: vmv1r.v v12, v10
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
define void @test_vsuxseg2_mask_nxv2f64_nxv2i32(<vscale x 2 x double> %val, double* %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv2f64_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
; CHECK-NEXT: vmv1r.v v12, v10
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
define void @test_vsuxseg2_nxv2f64_nxv2i8(<vscale x 2 x double> %val, double* %base, <vscale x 2 x i8> %index, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv2f64_nxv2i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
; CHECK-NEXT: vmv1r.v v12, v10
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
define void @test_vsuxseg2_mask_nxv2f64_nxv2i8(<vscale x 2 x double> %val, double* %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv2f64_nxv2i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
; CHECK-NEXT: vmv1r.v v12, v10
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
define void @test_vsuxseg2_nxv2f64_nxv2i16(<vscale x 2 x double> %val, double* %base, <vscale x 2 x i16> %index, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv2f64_nxv2i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
; CHECK-NEXT: vmv1r.v v12, v10
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
define void @test_vsuxseg2_mask_nxv2f64_nxv2i16(<vscale x 2 x double> %val, double* %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv2f64_nxv2i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
; CHECK-NEXT: vmv1r.v v12, v10
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
define void @test_vsuxseg2_nxv2f64_nxv2i64(<vscale x 2 x double> %val, double* %base, <vscale x 2 x i64> %index, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv2f64_nxv2i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
; CHECK-NEXT: vmv2r.v v12, v10
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
define void @test_vsuxseg2_mask_nxv2f64_nxv2i64(<vscale x 2 x double> %val, double* %base, <vscale x 2 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv2f64_nxv2i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
; CHECK-NEXT: vmv2r.v v12, v10
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
; CHECK-LABEL: test_vsuxseg3_nxv2f64_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
; CHECK-NEXT: vsuxseg3ei32.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_mask_nxv2f64_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
; CHECK-NEXT: vsuxseg3ei32.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_nxv2f64_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
; CHECK-NEXT: vsuxseg3ei8.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_mask_nxv2f64_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
; CHECK-NEXT: vsuxseg3ei8.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_nxv2f64_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
; CHECK-NEXT: vsuxseg3ei16.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_mask_nxv2f64_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
; CHECK-NEXT: vsuxseg3ei16.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_nxv2f64_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
; CHECK-NEXT: vsuxseg3ei64.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_mask_nxv2f64_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
; CHECK-NEXT: vsuxseg3ei64.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_nxv2f64_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
; CHECK-NEXT: vsuxseg4ei32.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_mask_nxv2f64_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
; CHECK-NEXT: vsuxseg4ei32.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_nxv2f64_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
; CHECK-NEXT: vsuxseg4ei8.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_mask_nxv2f64_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
; CHECK-NEXT: vsuxseg4ei8.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_nxv2f64_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
; CHECK-NEXT: vsuxseg4ei16.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_mask_nxv2f64_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
; CHECK-NEXT: vsuxseg4ei16.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_nxv2f64_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
; CHECK-NEXT: vsuxseg4ei64.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_mask_nxv2f64_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
; CHECK-NEXT: vsuxseg4ei64.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
define void @test_vsuxseg2_nxv4f16_nxv4i32(<vscale x 4 x half> %val, half* %base, <vscale x 4 x i32> %index, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv4f16_nxv4i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v10
define void @test_vsuxseg2_mask_nxv4f16_nxv4i32(<vscale x 4 x half> %val, half* %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv4f16_nxv4i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vsuxseg2ei32.v v8, (a0), v10, v0.t
define void @test_vsuxseg2_nxv4f16_nxv4i8(<vscale x 4 x half> %val, half* %base, <vscale x 4 x i8> %index, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv4f16_nxv4i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
define void @test_vsuxseg2_mask_nxv4f16_nxv4i8(<vscale x 4 x half> %val, half* %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv4f16_nxv4i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
define void @test_vsuxseg2_nxv4f16_nxv4i64(<vscale x 4 x half> %val, half* %base, <vscale x 4 x i64> %index, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv4f16_nxv4i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vsuxseg2ei64.v v8, (a0), v12
define void @test_vsuxseg2_mask_nxv4f16_nxv4i64(<vscale x 4 x half> %val, half* %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv4f16_nxv4i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vsuxseg2ei64.v v8, (a0), v12, v0.t
define void @test_vsuxseg2_nxv4f16_nxv4i16(<vscale x 4 x half> %val, half* %base, <vscale x 4 x i16> %index, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv4f16_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
define void @test_vsuxseg2_mask_nxv4f16_nxv4i16(<vscale x 4 x half> %val, half* %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv4f16_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
define void @test_vsuxseg3_nxv4f16_nxv4i32(<vscale x 4 x half> %val, half* %base, <vscale x 4 x i32> %index, i64 %vl) {
; CHECK-LABEL: test_vsuxseg3_nxv4f16_nxv4i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv2r.v v12, v10
+; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
-; CHECK-NEXT: vsuxseg3ei32.v v12, (a0), v10
+; CHECK-NEXT: vsuxseg3ei32.v v8, (a0), v12
; CHECK-NEXT: ret
entry:
tail call void @llvm.riscv.vsuxseg3.nxv4f16.nxv4i32(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, <vscale x 4 x i32> %index, i64 %vl)
define void @test_vsuxseg3_mask_nxv4f16_nxv4i32(<vscale x 4 x half> %val, half* %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsuxseg3_mask_nxv4f16_nxv4i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv2r.v v12, v10
+; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
-; CHECK-NEXT: vsuxseg3ei32.v v12, (a0), v10, v0.t
+; CHECK-NEXT: vsuxseg3ei32.v v8, (a0), v12, v0.t
; CHECK-NEXT: ret
entry:
tail call void @llvm.riscv.vsuxseg3.mask.nxv4f16.nxv4i32(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl)
; CHECK-LABEL: test_vsuxseg3_nxv4f16_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vsuxseg3ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_mask_nxv4f16_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vsuxseg3ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
define void @test_vsuxseg3_nxv4f16_nxv4i64(<vscale x 4 x half> %val, half* %base, <vscale x 4 x i64> %index, i64 %vl) {
; CHECK-LABEL: test_vsuxseg3_nxv4f16_nxv4i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
define void @test_vsuxseg3_mask_nxv4f16_nxv4i64(<vscale x 4 x half> %val, half* %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsuxseg3_mask_nxv4f16_nxv4i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-LABEL: test_vsuxseg3_nxv4f16_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vsuxseg3ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_mask_nxv4f16_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vsuxseg3ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_nxv4f16_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vsuxseg4ei32.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_mask_nxv4f16_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vsuxseg4ei32.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_nxv4f16_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vsuxseg4ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_mask_nxv4f16_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vsuxseg4ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
define void @test_vsuxseg4_nxv4f16_nxv4i64(<vscale x 4 x half> %val, half* %base, <vscale x 4 x i64> %index, i64 %vl) {
; CHECK-LABEL: test_vsuxseg4_nxv4f16_nxv4i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
define void @test_vsuxseg4_mask_nxv4f16_nxv4i64(<vscale x 4 x half> %val, half* %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsuxseg4_mask_nxv4f16_nxv4i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9_v10_v11
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vmv1r.v v11, v8
; CHECK-LABEL: test_vsuxseg4_nxv4f16_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vsuxseg4ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_mask_nxv4f16_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vsuxseg4ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_nxv4f16_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vsuxseg5ei32.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_mask_nxv4f16_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vsuxseg5ei32.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_nxv4f16_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vsuxseg5ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_mask_nxv4f16_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vsuxseg5ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
define void @test_vsuxseg5_nxv4f16_nxv4i64(<vscale x 4 x half> %val, half* %base, <vscale x 4 x i64> %index, i64 %vl) {
; CHECK-LABEL: test_vsuxseg5_nxv4f16_nxv4i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v16, v8
-; CHECK-NEXT: vmv1r.v v17, v16
-; CHECK-NEXT: vmv1r.v v18, v16
-; CHECK-NEXT: vmv1r.v v19, v16
-; CHECK-NEXT: vmv1r.v v20, v16
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv4r.v v16, v12
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
-; CHECK-NEXT: vsuxseg5ei64.v v16, (a0), v12
+; CHECK-NEXT: vsuxseg5ei64.v v8, (a0), v16
; CHECK-NEXT: ret
entry:
tail call void @llvm.riscv.vsuxseg5.nxv4f16.nxv4i64(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, <vscale x 4 x i64> %index, i64 %vl)
define void @test_vsuxseg5_mask_nxv4f16_nxv4i64(<vscale x 4 x half> %val, half* %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsuxseg5_mask_nxv4f16_nxv4i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v16, v8
-; CHECK-NEXT: vmv1r.v v17, v16
-; CHECK-NEXT: vmv1r.v v18, v16
-; CHECK-NEXT: vmv1r.v v19, v16
-; CHECK-NEXT: vmv1r.v v20, v16
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv1r.v v10, v8
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv4r.v v16, v12
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
-; CHECK-NEXT: vsuxseg5ei64.v v16, (a0), v12, v0.t
+; CHECK-NEXT: vsuxseg5ei64.v v8, (a0), v16, v0.t
; CHECK-NEXT: ret
entry:
tail call void @llvm.riscv.vsuxseg5.mask.nxv4f16.nxv4i64(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, half* %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl)
; CHECK-LABEL: test_vsuxseg5_nxv4f16_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vsuxseg5ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_mask_nxv4f16_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vsuxseg5ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_nxv4f16_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vsuxseg6ei32.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_mask_nxv4f16_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vsuxseg6ei32.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_nxv4f16_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vsuxseg6ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_mask_nxv4f16_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vsuxseg6ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_nxv4f16_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v16, v8
-; CHECK-NEXT: vmv1r.v v17, v16
-; CHECK-NEXT: vmv1r.v v18, v16
-; CHECK-NEXT: vmv1r.v v19, v16
-; CHECK-NEXT: vmv1r.v v20, v16
-; CHECK-NEXT: vmv1r.v v21, v16
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
+; CHECK-NEXT: vmv1r.v v19, v8
+; CHECK-NEXT: vmv1r.v v20, v8
+; CHECK-NEXT: vmv1r.v v21, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vsuxseg6ei64.v v16, (a0), v12
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_mask_nxv4f16_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v16, v8
-; CHECK-NEXT: vmv1r.v v17, v16
-; CHECK-NEXT: vmv1r.v v18, v16
-; CHECK-NEXT: vmv1r.v v19, v16
-; CHECK-NEXT: vmv1r.v v20, v16
-; CHECK-NEXT: vmv1r.v v21, v16
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
+; CHECK-NEXT: vmv1r.v v19, v8
+; CHECK-NEXT: vmv1r.v v20, v8
+; CHECK-NEXT: vmv1r.v v21, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vsuxseg6ei64.v v16, (a0), v12, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_nxv4f16_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vsuxseg6ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_mask_nxv4f16_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vsuxseg6ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_nxv4f16_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
-; CHECK-NEXT: vmv1r.v v18, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vsuxseg7ei32.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_mask_nxv4f16_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
-; CHECK-NEXT: vmv1r.v v18, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vsuxseg7ei32.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_nxv4f16_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vsuxseg7ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_mask_nxv4f16_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vsuxseg7ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_nxv4f16_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v16, v8
-; CHECK-NEXT: vmv1r.v v17, v16
-; CHECK-NEXT: vmv1r.v v18, v16
-; CHECK-NEXT: vmv1r.v v19, v16
-; CHECK-NEXT: vmv1r.v v20, v16
-; CHECK-NEXT: vmv1r.v v21, v16
-; CHECK-NEXT: vmv1r.v v22, v16
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
+; CHECK-NEXT: vmv1r.v v19, v8
+; CHECK-NEXT: vmv1r.v v20, v8
+; CHECK-NEXT: vmv1r.v v21, v8
+; CHECK-NEXT: vmv1r.v v22, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vsuxseg7ei64.v v16, (a0), v12
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_mask_nxv4f16_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v16, v8
-; CHECK-NEXT: vmv1r.v v17, v16
-; CHECK-NEXT: vmv1r.v v18, v16
-; CHECK-NEXT: vmv1r.v v19, v16
-; CHECK-NEXT: vmv1r.v v20, v16
-; CHECK-NEXT: vmv1r.v v21, v16
-; CHECK-NEXT: vmv1r.v v22, v16
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
+; CHECK-NEXT: vmv1r.v v19, v8
+; CHECK-NEXT: vmv1r.v v20, v8
+; CHECK-NEXT: vmv1r.v v21, v8
+; CHECK-NEXT: vmv1r.v v22, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vsuxseg7ei64.v v16, (a0), v12, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_nxv4f16_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vsuxseg7ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_mask_nxv4f16_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vsuxseg7ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_nxv4f16_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
-; CHECK-NEXT: vmv1r.v v18, v12
-; CHECK-NEXT: vmv1r.v v19, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
+; CHECK-NEXT: vmv1r.v v19, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vsuxseg8ei32.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_mask_nxv4f16_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
-; CHECK-NEXT: vmv1r.v v18, v12
-; CHECK-NEXT: vmv1r.v v19, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
+; CHECK-NEXT: vmv1r.v v19, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vsuxseg8ei32.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_nxv4f16_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vsuxseg8ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_mask_nxv4f16_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vsuxseg8ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_nxv4f16_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v16, v8
-; CHECK-NEXT: vmv1r.v v17, v16
-; CHECK-NEXT: vmv1r.v v18, v16
-; CHECK-NEXT: vmv1r.v v19, v16
-; CHECK-NEXT: vmv1r.v v20, v16
-; CHECK-NEXT: vmv1r.v v21, v16
-; CHECK-NEXT: vmv1r.v v22, v16
-; CHECK-NEXT: vmv1r.v v23, v16
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
+; CHECK-NEXT: vmv1r.v v19, v8
+; CHECK-NEXT: vmv1r.v v20, v8
+; CHECK-NEXT: vmv1r.v v21, v8
+; CHECK-NEXT: vmv1r.v v22, v8
+; CHECK-NEXT: vmv1r.v v23, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vsuxseg8ei64.v v16, (a0), v12
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_mask_nxv4f16_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v16, v8
-; CHECK-NEXT: vmv1r.v v17, v16
-; CHECK-NEXT: vmv1r.v v18, v16
-; CHECK-NEXT: vmv1r.v v19, v16
-; CHECK-NEXT: vmv1r.v v20, v16
-; CHECK-NEXT: vmv1r.v v21, v16
-; CHECK-NEXT: vmv1r.v v22, v16
-; CHECK-NEXT: vmv1r.v v23, v16
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
+; CHECK-NEXT: vmv1r.v v19, v8
+; CHECK-NEXT: vmv1r.v v20, v8
+; CHECK-NEXT: vmv1r.v v21, v8
+; CHECK-NEXT: vmv1r.v v22, v8
+; CHECK-NEXT: vmv1r.v v23, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vsuxseg8ei64.v v16, (a0), v12, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_nxv4f16_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vsuxseg8ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_mask_nxv4f16_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vsuxseg8ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
define void @test_vsuxseg2_nxv2f16_nxv2i32(<vscale x 2 x half> %val, half* %base, <vscale x 2 x i32> %index, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv2f16_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
define void @test_vsuxseg2_mask_nxv2f16_nxv2i32(<vscale x 2 x half> %val, half* %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv2f16_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
define void @test_vsuxseg2_nxv2f16_nxv2i8(<vscale x 2 x half> %val, half* %base, <vscale x 2 x i8> %index, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv2f16_nxv2i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
define void @test_vsuxseg2_mask_nxv2f16_nxv2i8(<vscale x 2 x half> %val, half* %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv2f16_nxv2i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
define void @test_vsuxseg2_nxv2f16_nxv2i16(<vscale x 2 x half> %val, half* %base, <vscale x 2 x i16> %index, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv2f16_nxv2i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
define void @test_vsuxseg2_mask_nxv2f16_nxv2i16(<vscale x 2 x half> %val, half* %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv2f16_nxv2i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
; CHECK-NEXT: vmv1r.v v10, v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
define void @test_vsuxseg2_nxv2f16_nxv2i64(<vscale x 2 x half> %val, half* %base, <vscale x 2 x i64> %index, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv2f16_nxv2i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vsuxseg2ei64.v v8, (a0), v10
define void @test_vsuxseg2_mask_nxv2f16_nxv2i64(<vscale x 2 x half> %val, half* %base, <vscale x 2 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv2f16_nxv2i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8 killed $v8 def $v8_v9
; CHECK-NEXT: vmv1r.v v9, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vsuxseg2ei64.v v8, (a0), v10, v0.t
; CHECK-LABEL: test_vsuxseg3_nxv2f16_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vsuxseg3ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_mask_nxv2f16_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vsuxseg3ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_nxv2f16_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vsuxseg3ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_mask_nxv2f16_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vsuxseg3ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_nxv2f16_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vsuxseg3ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_mask_nxv2f16_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vsuxseg3ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
define void @test_vsuxseg3_nxv2f16_nxv2i64(<vscale x 2 x half> %val, half* %base, <vscale x 2 x i64> %index, i64 %vl) {
; CHECK-LABEL: test_vsuxseg3_nxv2f16_nxv2i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv2r.v v12, v10
+; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
-; CHECK-NEXT: vsuxseg3ei64.v v12, (a0), v10
+; CHECK-NEXT: vsuxseg3ei64.v v8, (a0), v12
; CHECK-NEXT: ret
entry:
tail call void @llvm.riscv.vsuxseg3.nxv2f16.nxv2i64(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, half* %base, <vscale x 2 x i64> %index, i64 %vl)
define void @test_vsuxseg3_mask_nxv2f16_nxv2i64(<vscale x 2 x half> %val, half* %base, <vscale x 2 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsuxseg3_mask_nxv2f16_nxv2i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
+; CHECK-NEXT: vmv1r.v v9, v8
+; CHECK-NEXT: vmv2r.v v12, v10
+; CHECK-NEXT: vmv1r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
-; CHECK-NEXT: vsuxseg3ei64.v v12, (a0), v10, v0.t
+; CHECK-NEXT: vsuxseg3ei64.v v8, (a0), v12, v0.t
; CHECK-NEXT: ret
entry:
tail call void @llvm.riscv.vsuxseg3.mask.nxv2f16.nxv2i64(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, half* %base, <vscale x 2 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl)
; CHECK-LABEL: test_vsuxseg4_nxv2f16_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vsuxseg4ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_mask_nxv2f16_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vsuxseg4ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_nxv2f16_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vsuxseg4ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_mask_nxv2f16_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vsuxseg4ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_nxv2f16_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vsuxseg4ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_mask_nxv2f16_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vsuxseg4ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_nxv2f16_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vsuxseg4ei64.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_mask_nxv2f16_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vsuxseg4ei64.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_nxv2f16_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vsuxseg5ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_mask_nxv2f16_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vsuxseg5ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_nxv2f16_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vsuxseg5ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_mask_nxv2f16_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vsuxseg5ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_nxv2f16_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vsuxseg5ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_mask_nxv2f16_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vsuxseg5ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_nxv2f16_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vsuxseg5ei64.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg5_mask_nxv2f16_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vsuxseg5ei64.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_nxv2f16_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vsuxseg6ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_mask_nxv2f16_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vsuxseg6ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_nxv2f16_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vsuxseg6ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_mask_nxv2f16_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vsuxseg6ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_nxv2f16_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vsuxseg6ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_mask_nxv2f16_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vsuxseg6ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_nxv2f16_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vsuxseg6ei64.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg6_mask_nxv2f16_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vsuxseg6ei64.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_nxv2f16_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vsuxseg7ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_mask_nxv2f16_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vsuxseg7ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_nxv2f16_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vsuxseg7ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_mask_nxv2f16_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vsuxseg7ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_nxv2f16_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vsuxseg7ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_mask_nxv2f16_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vsuxseg7ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_nxv2f16_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
-; CHECK-NEXT: vmv1r.v v18, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vsuxseg7ei64.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg7_mask_nxv2f16_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
-; CHECK-NEXT: vmv1r.v v18, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vsuxseg7ei64.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_nxv2f16_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vsuxseg8ei32.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_mask_nxv2f16_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vsuxseg8ei32.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_nxv2f16_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vsuxseg8ei8.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_mask_nxv2f16_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vsuxseg8ei8.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_nxv2f16_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vsuxseg8ei16.v v10, (a0), v9
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_mask_nxv2f16_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v8
-; CHECK-NEXT: vmv1r.v v11, v10
-; CHECK-NEXT: vmv1r.v v12, v10
-; CHECK-NEXT: vmv1r.v v13, v10
-; CHECK-NEXT: vmv1r.v v14, v10
-; CHECK-NEXT: vmv1r.v v15, v10
-; CHECK-NEXT: vmv1r.v v16, v10
-; CHECK-NEXT: vmv1r.v v17, v10
+; CHECK-NEXT: vmv1r.v v11, v8
+; CHECK-NEXT: vmv1r.v v12, v8
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vsuxseg8ei16.v v10, (a0), v9, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_nxv2f16_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
-; CHECK-NEXT: vmv1r.v v18, v12
-; CHECK-NEXT: vmv1r.v v19, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
+; CHECK-NEXT: vmv1r.v v19, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vsuxseg8ei64.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg8_mask_nxv2f16_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v12, v8
-; CHECK-NEXT: vmv1r.v v13, v12
-; CHECK-NEXT: vmv1r.v v14, v12
-; CHECK-NEXT: vmv1r.v v15, v12
-; CHECK-NEXT: vmv1r.v v16, v12
-; CHECK-NEXT: vmv1r.v v17, v12
-; CHECK-NEXT: vmv1r.v v18, v12
-; CHECK-NEXT: vmv1r.v v19, v12
+; CHECK-NEXT: vmv1r.v v13, v8
+; CHECK-NEXT: vmv1r.v v14, v8
+; CHECK-NEXT: vmv1r.v v15, v8
+; CHECK-NEXT: vmv1r.v v16, v8
+; CHECK-NEXT: vmv1r.v v17, v8
+; CHECK-NEXT: vmv1r.v v18, v8
+; CHECK-NEXT: vmv1r.v v19, v8
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vsuxseg8ei64.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
define void @test_vsuxseg2_nxv4f32_nxv4i32(<vscale x 4 x float> %val, float* %base, <vscale x 4 x i32> %index, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv4f32_nxv4i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
; CHECK-NEXT: vmv2r.v v12, v10
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
define void @test_vsuxseg2_mask_nxv4f32_nxv4i32(<vscale x 4 x float> %val, float* %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv4f32_nxv4i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
; CHECK-NEXT: vmv2r.v v12, v10
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
define void @test_vsuxseg2_nxv4f32_nxv4i8(<vscale x 4 x float> %val, float* %base, <vscale x 4 x i8> %index, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv4f32_nxv4i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
; CHECK-NEXT: vmv1r.v v12, v10
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
define void @test_vsuxseg2_mask_nxv4f32_nxv4i8(<vscale x 4 x float> %val, float* %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv4f32_nxv4i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
; CHECK-NEXT: vmv1r.v v12, v10
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
define void @test_vsuxseg2_nxv4f32_nxv4i64(<vscale x 4 x float> %val, float* %base, <vscale x 4 x i64> %index, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv4f32_nxv4i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; CHECK-NEXT: vsuxseg2ei64.v v8, (a0), v12
define void @test_vsuxseg2_mask_nxv4f32_nxv4i64(<vscale x 4 x float> %val, float* %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv4f32_nxv4i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; CHECK-NEXT: vsuxseg2ei64.v v8, (a0), v12, v0.t
define void @test_vsuxseg2_nxv4f32_nxv4i16(<vscale x 4 x float> %val, float* %base, <vscale x 4 x i16> %index, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_nxv4f32_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
; CHECK-NEXT: vmv1r.v v12, v10
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
define void @test_vsuxseg2_mask_nxv4f32_nxv4i16(<vscale x 4 x float> %val, float* %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv4f32_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
; CHECK-NEXT: vmv1r.v v12, v10
; CHECK-NEXT: vmv2r.v v10, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; CHECK-LABEL: test_vsuxseg3_nxv4f32_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; CHECK-NEXT: vsuxseg3ei32.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_mask_nxv4f32_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; CHECK-NEXT: vsuxseg3ei32.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_nxv4f32_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; CHECK-NEXT: vsuxseg3ei8.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_mask_nxv4f32_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; CHECK-NEXT: vsuxseg3ei8.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
define void @test_vsuxseg3_nxv4f32_nxv4i64(<vscale x 4 x float> %val, float* %base, <vscale x 4 x i64> %index, i64 %vl) {
; CHECK-LABEL: test_vsuxseg3_nxv4f32_nxv4i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv2r.v v16, v8
-; CHECK-NEXT: vmv2r.v v18, v16
-; CHECK-NEXT: vmv2r.v v20, v16
+; CHECK-NEXT: vmv2r.v v10, v8
+; CHECK-NEXT: vmv4r.v v16, v12
+; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
-; CHECK-NEXT: vsuxseg3ei64.v v16, (a0), v12
+; CHECK-NEXT: vsuxseg3ei64.v v8, (a0), v16
; CHECK-NEXT: ret
entry:
tail call void @llvm.riscv.vsuxseg3.nxv4f32.nxv4i64(<vscale x 4 x float> %val,<vscale x 4 x float> %val,<vscale x 4 x float> %val, float* %base, <vscale x 4 x i64> %index, i64 %vl)
define void @test_vsuxseg3_mask_nxv4f32_nxv4i64(<vscale x 4 x float> %val, float* %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl) {
; CHECK-LABEL: test_vsuxseg3_mask_nxv4f32_nxv4i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv2r.v v16, v8
-; CHECK-NEXT: vmv2r.v v18, v16
-; CHECK-NEXT: vmv2r.v v20, v16
+; CHECK-NEXT: vmv2r.v v10, v8
+; CHECK-NEXT: vmv4r.v v16, v12
+; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
-; CHECK-NEXT: vsuxseg3ei64.v v16, (a0), v12, v0.t
+; CHECK-NEXT: vsuxseg3ei64.v v8, (a0), v16, v0.t
; CHECK-NEXT: ret
entry:
tail call void @llvm.riscv.vsuxseg3.mask.nxv4f32.nxv4i64(<vscale x 4 x float> %val,<vscale x 4 x float> %val,<vscale x 4 x float> %val, float* %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl)
; CHECK-LABEL: test_vsuxseg3_nxv4f32_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; CHECK-NEXT: vsuxseg3ei16.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg3_mask_nxv4f32_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; CHECK-NEXT: vsuxseg3ei16.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_nxv4f32_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; CHECK-NEXT: vsuxseg4ei32.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_mask_nxv4f32_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; CHECK-NEXT: vsuxseg4ei32.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_nxv4f32_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; CHECK-NEXT: vsuxseg4ei8.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_mask_nxv4f32_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; CHECK-NEXT: vsuxseg4ei8.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_nxv4f32_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v16, v8
-; CHECK-NEXT: vmv2r.v v18, v16
-; CHECK-NEXT: vmv2r.v v20, v16
-; CHECK-NEXT: vmv2r.v v22, v16
+; CHECK-NEXT: vmv2r.v v18, v8
+; CHECK-NEXT: vmv2r.v v20, v8
+; CHECK-NEXT: vmv2r.v v22, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; CHECK-NEXT: vsuxseg4ei64.v v16, (a0), v12
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_mask_nxv4f32_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v16, v8
-; CHECK-NEXT: vmv2r.v v18, v16
-; CHECK-NEXT: vmv2r.v v20, v16
-; CHECK-NEXT: vmv2r.v v22, v16
+; CHECK-NEXT: vmv2r.v v18, v8
+; CHECK-NEXT: vmv2r.v v20, v8
+; CHECK-NEXT: vmv2r.v v22, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; CHECK-NEXT: vsuxseg4ei64.v v16, (a0), v12, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_nxv4f32_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; CHECK-NEXT: vsuxseg4ei16.v v12, (a0), v10
; CHECK-NEXT: ret
; CHECK-LABEL: test_vsuxseg4_mask_nxv4f32_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vmv2r.v v14, v12
-; CHECK-NEXT: vmv2r.v v16, v12
-; CHECK-NEXT: vmv2r.v v18, v12
+; CHECK-NEXT: vmv2r.v v14, v8
+; CHECK-NEXT: vmv2r.v v16, v8
+; CHECK-NEXT: vmv2r.v v18, v8
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; CHECK-NEXT: vsuxseg4ei16.v v12, (a0), v10, v0.t
; CHECK-NEXT: ret
; CHECK-NEXT: vlseg2e16.v v4, (a0)
; CHECK-NEXT: vmv4r.v v8, v4
; CHECK-NEXT: vlseg2e16.v v4, (a0), v0.t
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 16 x i16>,<vscale x 16 x i16>} @llvm.riscv.vlseg2.nxv16i16(i16* %base, i64 0)
; CHECK-NEXT: vlsseg2e16.v v4, (a0), a1
; CHECK-NEXT: vmv4r.v v8, v4
; CHECK-NEXT: vlsseg2e16.v v4, (a0), a1, v0.t
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 16 x i16>,<vscale x 16 x i16>} @llvm.riscv.vlsseg2.nxv16i16(i16* %base, i64 %offset, i64 0)
; CHECK-NEXT: vlseg2e16ff.v v4, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a1)
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 16 x i16>,<vscale x 16 x i16>, i64} @llvm.riscv.vlseg2ff.nxv16i16(i16* %base, i64 0)
; CHECK-NEXT: vlseg2e16ff.v v4, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a1)
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
; CHECK-NEXT: ret
entry:
%0 = tail call {<vscale x 16 x i16>,<vscale x 16 x i16>, i64} @llvm.riscv.vlseg2ff.mask.nxv16i16(<vscale x 16 x i16> %val,<vscale x 16 x i16> %val, i16* %base, <vscale x 16 x i1> %mask, i64 0, i64 1)
define void @test_vsseg2_nxv16i16(<vscale x 16 x i16> %val, i16* %base) {
; CHECK-LABEL: test_vsseg2_nxv16i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetivli zero, 0, e16, m4, ta, mu
; CHECK-NEXT: vsseg2e16.v v8, (a0)
define void @test_vsseg2_mask_nxv16i16(<vscale x 16 x i16> %val, i16* %base, <vscale x 16 x i1> %mask) {
; CHECK-LABEL: test_vsseg2_mask_nxv16i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetivli zero, 0, e16, m4, ta, mu
; CHECK-NEXT: vsseg2e16.v v8, (a0), v0.t
define void @test_vssseg2_nxv16i16(<vscale x 16 x i16> %val, i16* %base, i64 %offset) {
; CHECK-LABEL: test_vssseg2_nxv16i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetivli zero, 0, e16, m4, ta, mu
; CHECK-NEXT: vssseg2e16.v v8, (a0), a1
define void @test_vssseg2_mask_nxv16i16(<vscale x 16 x i16> %val, i16* %base, i64 %offset, <vscale x 16 x i1> %mask) {
; CHECK-LABEL: test_vssseg2_mask_nxv16i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetivli zero, 0, e16, m4, ta, mu
; CHECK-NEXT: vssseg2e16.v v8, (a0), a1, v0.t
define void @test_vsoxseg2_nxv16i16_nxv16i16(<vscale x 16 x i16> %val, i16* %base, <vscale x 16 x i16> %index) {
; CHECK-LABEL: test_vsoxseg2_nxv16i16_nxv16i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
; CHECK-NEXT: vmv4r.v v16, v12
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetivli zero, 0, e16, m4, ta, mu
define void @test_vsoxseg2_mask_nxv16i16_nxv16i16(<vscale x 16 x i16> %val, i16* %base, <vscale x 16 x i16> %index, <vscale x 16 x i1> %mask) {
; CHECK-LABEL: test_vsoxseg2_mask_nxv16i16_nxv16i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
; CHECK-NEXT: vmv4r.v v16, v12
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetivli zero, 0, e16, m4, ta, mu
define void @test_vsuxseg2_nxv16i16_nxv16i16(<vscale x 16 x i16> %val, i16* %base, <vscale x 16 x i16> %index) {
; CHECK-LABEL: test_vsuxseg2_nxv16i16_nxv16i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
; CHECK-NEXT: vmv4r.v v16, v12
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetivli zero, 0, e16, m4, ta, mu
define void @test_vsuxseg2_mask_nxv16i16_nxv16i16(<vscale x 16 x i16> %val, i16* %base, <vscale x 16 x i16> %index, <vscale x 16 x i1> %mask) {
; CHECK-LABEL: test_vsuxseg2_mask_nxv16i16_nxv16i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
; CHECK-NEXT: vmv4r.v v16, v12
; CHECK-NEXT: vmv4r.v v12, v8
; CHECK-NEXT: vsetivli zero, 0, e16, m4, ta, mu