--- /dev/null
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs < %s | FileCheck %s
+
+; FIXME: The scalar/vector operations ('xv' and 'iv' tests) should swap
+; operands and condition codes accordingly in order to generate a 'vx' or 'vi'
+; instruction.
+
+define <vscale x 8 x i1> @icmp_eq_vv_nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %vb) {
+; CHECK-LABEL: icmp_eq_vv_nxv8i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu
+; CHECK-NEXT: vmseq.vv v0, v16, v17
+; CHECK-NEXT: ret
+ %vc = icmp eq <vscale x 8 x i8> %va, %vb
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_eq_vx_nxv8i8(<vscale x 8 x i8> %va, i8 %b) {
+; CHECK-LABEL: icmp_eq_vx_nxv8i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, zero, e8,m1,ta,mu
+; CHECK-NEXT: vmseq.vx v0, v16, a0
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i8> undef, i8 %b, i32 0
+ %splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp eq <vscale x 8 x i8> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_eq_xv_nxv8i8(<vscale x 8 x i8> %va, i8 %b) {
+; CHECK-LABEL: icmp_eq_xv_nxv8i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, zero, e8,m1,ta,mu
+; CHECK-NEXT: vmv.v.x v25, a0
+; CHECK-NEXT: vmseq.vv v0, v25, v16
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i8> undef, i8 %b, i32 0
+ %splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp eq <vscale x 8 x i8> %splat, %va
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_eq_vi_nxv8i8_0(<vscale x 8 x i8> %va) {
+; CHECK-LABEL: icmp_eq_vi_nxv8i8_0:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu
+; CHECK-NEXT: vmseq.vi v0, v16, 0
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i8> undef, i8 0, i32 0
+ %splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp eq <vscale x 8 x i8> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_eq_vi_nxv8i8_1(<vscale x 8 x i8> %va) {
+; CHECK-LABEL: icmp_eq_vi_nxv8i8_1:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu
+; CHECK-NEXT: vmseq.vi v0, v16, 5
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i8> undef, i8 5, i32 0
+ %splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp eq <vscale x 8 x i8> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_eq_iv_nxv8i8_1(<vscale x 8 x i8> %va) {
+; CHECK-LABEL: icmp_eq_iv_nxv8i8_1:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu
+; CHECK-NEXT: vmseq.vi v0, v16, 5
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i8> undef, i8 5, i32 0
+ %splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp eq <vscale x 8 x i8> %splat, %va
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_ne_vv_nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %vb) {
+; CHECK-LABEL: icmp_ne_vv_nxv8i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu
+; CHECK-NEXT: vmsne.vv v0, v16, v17
+; CHECK-NEXT: ret
+ %vc = icmp ne <vscale x 8 x i8> %va, %vb
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_ne_vx_nxv8i8(<vscale x 8 x i8> %va, i8 %b) {
+; CHECK-LABEL: icmp_ne_vx_nxv8i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, zero, e8,m1,ta,mu
+; CHECK-NEXT: vmsne.vx v0, v16, a0
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i8> undef, i8 %b, i32 0
+ %splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp ne <vscale x 8 x i8> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_ne_xv_nxv8i8(<vscale x 8 x i8> %va, i8 %b) {
+; CHECK-LABEL: icmp_ne_xv_nxv8i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, zero, e8,m1,ta,mu
+; CHECK-NEXT: vmv.v.x v25, a0
+; CHECK-NEXT: vmsne.vv v0, v25, v16
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i8> undef, i8 %b, i32 0
+ %splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp ne <vscale x 8 x i8> %splat, %va
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_ne_vi_nxv8i8_0(<vscale x 8 x i8> %va) {
+; CHECK-LABEL: icmp_ne_vi_nxv8i8_0:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu
+; CHECK-NEXT: vmsne.vi v0, v16, 5
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i8> undef, i8 5, i32 0
+ %splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp ne <vscale x 8 x i8> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_ugt_vv_nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %vb) {
+; CHECK-LABEL: icmp_ugt_vv_nxv8i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu
+; CHECK-NEXT: vmsltu.vv v0, v17, v16
+; CHECK-NEXT: ret
+ %vc = icmp ugt <vscale x 8 x i8> %va, %vb
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_ugt_vx_nxv8i8(<vscale x 8 x i8> %va, i8 %b) {
+; CHECK-LABEL: icmp_ugt_vx_nxv8i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, zero, e8,m1,ta,mu
+; CHECK-NEXT: vmsgtu.vx v0, v16, a0
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i8> undef, i8 %b, i32 0
+ %splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp ugt <vscale x 8 x i8> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_ugt_xv_nxv8i8(<vscale x 8 x i8> %va, i8 %b) {
+; CHECK-LABEL: icmp_ugt_xv_nxv8i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, zero, e8,m1,ta,mu
+; CHECK-NEXT: vmv.v.x v25, a0
+; CHECK-NEXT: vmsltu.vv v0, v16, v25
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i8> undef, i8 %b, i32 0
+ %splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp ugt <vscale x 8 x i8> %splat, %va
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_ugt_vi_nxv8i8_0(<vscale x 8 x i8> %va) {
+; CHECK-LABEL: icmp_ugt_vi_nxv8i8_0:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu
+; CHECK-NEXT: vmsgtu.vi v0, v16, 5
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i8> undef, i8 5, i32 0
+ %splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp ugt <vscale x 8 x i8> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_uge_vv_nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %vb) {
+; CHECK-LABEL: icmp_uge_vv_nxv8i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu
+; CHECK-NEXT: vmsleu.vv v0, v17, v16
+; CHECK-NEXT: ret
+ %vc = icmp uge <vscale x 8 x i8> %va, %vb
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_uge_vx_nxv8i8(<vscale x 8 x i8> %va, i8 %b) {
+; CHECK-LABEL: icmp_uge_vx_nxv8i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, zero, e8,m1,ta,mu
+; CHECK-NEXT: vmv.v.x v25, a0
+; CHECK-NEXT: vmsleu.vv v0, v25, v16
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i8> undef, i8 %b, i32 0
+ %splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp uge <vscale x 8 x i8> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_uge_xv_nxv8i8(<vscale x 8 x i8> %va, i8 %b) {
+; CHECK-LABEL: icmp_uge_xv_nxv8i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, zero, e8,m1,ta,mu
+; CHECK-NEXT: vmv.v.x v25, a0
+; CHECK-NEXT: vmsleu.vv v0, v16, v25
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i8> undef, i8 %b, i32 0
+ %splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp uge <vscale x 8 x i8> %splat, %va
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_uge_vi_nxv8i8_0(<vscale x 8 x i8> %va) {
+; CHECK-LABEL: icmp_uge_vi_nxv8i8_0:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu
+; CHECK-NEXT: vmv.v.i v25, -16
+; CHECK-NEXT: vmsleu.vv v0, v25, v16
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i8> undef, i8 -16, i32 0
+ %splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp uge <vscale x 8 x i8> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_uge_vi_nxv8i8_1(<vscale x 8 x i8> %va) {
+; CHECK-LABEL: icmp_uge_vi_nxv8i8_1:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu
+; CHECK-NEXT: vmv.v.i v25, 15
+; CHECK-NEXT: vmsleu.vv v0, v25, v16
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i8> undef, i8 15, i32 0
+ %splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp uge <vscale x 8 x i8> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_uge_iv_nxv8i8_1(<vscale x 8 x i8> %va) {
+; CHECK-LABEL: icmp_uge_iv_nxv8i8_1:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu
+; CHECK-NEXT: vmsleu.vi v0, v16, 15
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i8> undef, i8 15, i32 0
+ %splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp uge <vscale x 8 x i8> %splat, %va
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_uge_vi_nxv8i8_2(<vscale x 8 x i8> %va) {
+; CHECK-LABEL: icmp_uge_vi_nxv8i8_2:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu
+; CHECK-NEXT: vmset.m v0
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i8> undef, i8 0, i32 0
+ %splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp uge <vscale x 8 x i8> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_uge_vi_nxv8i8_3(<vscale x 8 x i8> %va) {
+; CHECK-LABEL: icmp_uge_vi_nxv8i8_3:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu
+; CHECK-NEXT: vmv.v.i v25, 1
+; CHECK-NEXT: vmsleu.vv v0, v25, v16
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i8> undef, i8 1, i32 0
+ %splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp uge <vscale x 8 x i8> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_uge_vi_nxv8i8_4(<vscale x 8 x i8> %va) {
+; CHECK-LABEL: icmp_uge_vi_nxv8i8_4:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu
+; CHECK-NEXT: vmv.v.i v25, -15
+; CHECK-NEXT: vmsleu.vv v0, v25, v16
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i8> undef, i8 -15, i32 0
+ %splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp uge <vscale x 8 x i8> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_uge_vi_nxv8i8_5(<vscale x 8 x i8> %va) {
+; CHECK-LABEL: icmp_uge_vi_nxv8i8_5:
+; CHECK: # %bb.0:
+; CHECK-NEXT: addi a0, zero, 16
+; CHECK-NEXT: vsetvli a1, zero, e8,m1,ta,mu
+; CHECK-NEXT: vmv.v.x v25, a0
+; CHECK-NEXT: vmsleu.vv v0, v25, v16
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i8> undef, i8 16, i32 0
+ %splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp uge <vscale x 8 x i8> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_ult_vv_nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %vb) {
+; CHECK-LABEL: icmp_ult_vv_nxv8i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu
+; CHECK-NEXT: vmsltu.vv v0, v16, v17
+; CHECK-NEXT: ret
+ %vc = icmp ult <vscale x 8 x i8> %va, %vb
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_ult_vx_nxv8i8(<vscale x 8 x i8> %va, i8 %b) {
+; CHECK-LABEL: icmp_ult_vx_nxv8i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, zero, e8,m1,ta,mu
+; CHECK-NEXT: vmsltu.vx v0, v16, a0
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i8> undef, i8 %b, i32 0
+ %splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp ult <vscale x 8 x i8> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_ult_xv_nxv8i8(<vscale x 8 x i8> %va, i8 %b) {
+; CHECK-LABEL: icmp_ult_xv_nxv8i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, zero, e8,m1,ta,mu
+; CHECK-NEXT: vmv.v.x v25, a0
+; CHECK-NEXT: vmsltu.vv v0, v25, v16
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i8> undef, i8 %b, i32 0
+ %splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp ult <vscale x 8 x i8> %splat, %va
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_ult_vi_nxv8i8_0(<vscale x 8 x i8> %va) {
+; CHECK-LABEL: icmp_ult_vi_nxv8i8_0:
+; CHECK: # %bb.0:
+; CHECK-NEXT: addi a0, zero, -16
+; CHECK-NEXT: vsetvli a1, zero, e8,m1,ta,mu
+; CHECK-NEXT: vmsltu.vx v0, v16, a0
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i8> undef, i8 -16, i32 0
+ %splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp ult <vscale x 8 x i8> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_ult_vi_nxv8i8_1(<vscale x 8 x i8> %va) {
+; CHECK-LABEL: icmp_ult_vi_nxv8i8_1:
+; CHECK: # %bb.0:
+; CHECK-NEXT: addi a0, zero, -15
+; CHECK-NEXT: vsetvli a1, zero, e8,m1,ta,mu
+; CHECK-NEXT: vmsltu.vx v0, v16, a0
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i8> undef, i8 -15, i32 0
+ %splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp ult <vscale x 8 x i8> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_ult_iv_nxv8i8_1(<vscale x 8 x i8> %va) {
+; CHECK-LABEL: icmp_ult_iv_nxv8i8_1:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu
+; CHECK-NEXT: vmsgtu.vi v0, v16, -15
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i8> undef, i8 -15, i32 0
+ %splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp ult <vscale x 8 x i8> %splat, %va
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_ult_vi_nxv8i8_2(<vscale x 8 x i8> %va) {
+; CHECK-LABEL: icmp_ult_vi_nxv8i8_2:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu
+; CHECK-NEXT: vmclr.m v0
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i8> undef, i8 0, i32 0
+ %splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp ult <vscale x 8 x i8> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_ult_vi_nxv8i8_3(<vscale x 8 x i8> %va) {
+; CHECK-LABEL: icmp_ult_vi_nxv8i8_3:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu
+; CHECK-NEXT: vmseq.vi v0, v16, 0
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i8> undef, i8 1, i32 0
+ %splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp ult <vscale x 8 x i8> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_ult_vi_nxv8i8_4(<vscale x 8 x i8> %va) {
+; CHECK-LABEL: icmp_ult_vi_nxv8i8_4:
+; CHECK: # %bb.0:
+; CHECK-NEXT: addi a0, zero, 16
+; CHECK-NEXT: vsetvli a1, zero, e8,m1,ta,mu
+; CHECK-NEXT: vmsltu.vx v0, v16, a0
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i8> undef, i8 16, i32 0
+ %splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp ult <vscale x 8 x i8> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_ule_vv_nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %vb) {
+; CHECK-LABEL: icmp_ule_vv_nxv8i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu
+; CHECK-NEXT: vmsleu.vv v0, v16, v17
+; CHECK-NEXT: ret
+ %vc = icmp ule <vscale x 8 x i8> %va, %vb
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_ule_vx_nxv8i8(<vscale x 8 x i8> %va, i8 %b) {
+; CHECK-LABEL: icmp_ule_vx_nxv8i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, zero, e8,m1,ta,mu
+; CHECK-NEXT: vmsleu.vx v0, v16, a0
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i8> undef, i8 %b, i32 0
+ %splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp ule <vscale x 8 x i8> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_ule_xv_nxv8i8(<vscale x 8 x i8> %va, i8 %b) {
+; CHECK-LABEL: icmp_ule_xv_nxv8i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, zero, e8,m1,ta,mu
+; CHECK-NEXT: vmv.v.x v25, a0
+; CHECK-NEXT: vmsleu.vv v0, v25, v16
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i8> undef, i8 %b, i32 0
+ %splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp ule <vscale x 8 x i8> %splat, %va
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_ule_vi_nxv8i8_0(<vscale x 8 x i8> %va) {
+; CHECK-LABEL: icmp_ule_vi_nxv8i8_0:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu
+; CHECK-NEXT: vmsleu.vi v0, v16, 5
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i8> undef, i8 5, i32 0
+ %splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp ule <vscale x 8 x i8> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_sgt_vv_nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %vb) {
+; CHECK-LABEL: icmp_sgt_vv_nxv8i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu
+; CHECK-NEXT: vmslt.vv v0, v17, v16
+; CHECK-NEXT: ret
+ %vc = icmp sgt <vscale x 8 x i8> %va, %vb
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_sgt_vx_nxv8i8(<vscale x 8 x i8> %va, i8 %b) {
+; CHECK-LABEL: icmp_sgt_vx_nxv8i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, zero, e8,m1,ta,mu
+; CHECK-NEXT: vmsgt.vx v0, v16, a0
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i8> undef, i8 %b, i32 0
+ %splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp sgt <vscale x 8 x i8> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_sgt_xv_nxv8i8(<vscale x 8 x i8> %va, i8 %b) {
+; CHECK-LABEL: icmp_sgt_xv_nxv8i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, zero, e8,m1,ta,mu
+; CHECK-NEXT: vmv.v.x v25, a0
+; CHECK-NEXT: vmslt.vv v0, v16, v25
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i8> undef, i8 %b, i32 0
+ %splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp sgt <vscale x 8 x i8> %splat, %va
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_sgt_vi_nxv8i8_0(<vscale x 8 x i8> %va) {
+; CHECK-LABEL: icmp_sgt_vi_nxv8i8_0:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu
+; CHECK-NEXT: vmsgt.vi v0, v16, 5
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i8> undef, i8 5, i32 0
+ %splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp sgt <vscale x 8 x i8> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_sge_vv_nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %vb) {
+; CHECK-LABEL: icmp_sge_vv_nxv8i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu
+; CHECK-NEXT: vmsle.vv v0, v17, v16
+; CHECK-NEXT: ret
+ %vc = icmp sge <vscale x 8 x i8> %va, %vb
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_sge_vx_nxv8i8(<vscale x 8 x i8> %va, i8 %b) {
+; CHECK-LABEL: icmp_sge_vx_nxv8i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, zero, e8,m1,ta,mu
+; CHECK-NEXT: vmv.v.x v25, a0
+; CHECK-NEXT: vmsle.vv v0, v25, v16
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i8> undef, i8 %b, i32 0
+ %splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp sge <vscale x 8 x i8> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_sge_xv_nxv8i8(<vscale x 8 x i8> %va, i8 %b) {
+; CHECK-LABEL: icmp_sge_xv_nxv8i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, zero, e8,m1,ta,mu
+; CHECK-NEXT: vmv.v.x v25, a0
+; CHECK-NEXT: vmsle.vv v0, v16, v25
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i8> undef, i8 %b, i32 0
+ %splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp sge <vscale x 8 x i8> %splat, %va
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_sge_vi_nxv8i8_0(<vscale x 8 x i8> %va) {
+; CHECK-LABEL: icmp_sge_vi_nxv8i8_0:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu
+; CHECK-NEXT: vmv.v.i v25, -16
+; CHECK-NEXT: vmsle.vv v0, v25, v16
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i8> undef, i8 -16, i32 0
+ %splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp sge <vscale x 8 x i8> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_sge_vi_nxv8i8_1(<vscale x 8 x i8> %va) {
+; CHECK-LABEL: icmp_sge_vi_nxv8i8_1:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu
+; CHECK-NEXT: vmv.v.i v25, -15
+; CHECK-NEXT: vmsle.vv v0, v25, v16
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i8> undef, i8 -15, i32 0
+ %splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp sge <vscale x 8 x i8> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_sge_iv_nxv8i8_1(<vscale x 8 x i8> %va) {
+; CHECK-LABEL: icmp_sge_iv_nxv8i8_1:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu
+; CHECK-NEXT: vmsle.vi v0, v16, -15
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i8> undef, i8 -15, i32 0
+ %splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp sge <vscale x 8 x i8> %splat, %va
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_sge_vi_nxv8i8_2(<vscale x 8 x i8> %va) {
+; CHECK-LABEL: icmp_sge_vi_nxv8i8_2:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu
+; CHECK-NEXT: vmv.v.i v25, 0
+; CHECK-NEXT: vmsle.vv v0, v25, v16
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i8> undef, i8 0, i32 0
+ %splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp sge <vscale x 8 x i8> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_sge_vi_nxv8i8_3(<vscale x 8 x i8> %va) {
+; CHECK-LABEL: icmp_sge_vi_nxv8i8_3:
+; CHECK: # %bb.0:
+; CHECK-NEXT: addi a0, zero, 16
+; CHECK-NEXT: vsetvli a1, zero, e8,m1,ta,mu
+; CHECK-NEXT: vmv.v.x v25, a0
+; CHECK-NEXT: vmsle.vv v0, v25, v16
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i8> undef, i8 16, i32 0
+ %splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp sge <vscale x 8 x i8> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_slt_vv_nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %vb) {
+; CHECK-LABEL: icmp_slt_vv_nxv8i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu
+; CHECK-NEXT: vmslt.vv v0, v16, v17
+; CHECK-NEXT: ret
+ %vc = icmp slt <vscale x 8 x i8> %va, %vb
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_slt_vx_nxv8i8(<vscale x 8 x i8> %va, i8 %b) {
+; CHECK-LABEL: icmp_slt_vx_nxv8i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, zero, e8,m1,ta,mu
+; CHECK-NEXT: vmslt.vx v0, v16, a0
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i8> undef, i8 %b, i32 0
+ %splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp slt <vscale x 8 x i8> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_slt_xv_nxv8i8(<vscale x 8 x i8> %va, i8 %b) {
+; CHECK-LABEL: icmp_slt_xv_nxv8i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, zero, e8,m1,ta,mu
+; CHECK-NEXT: vmv.v.x v25, a0
+; CHECK-NEXT: vmslt.vv v0, v25, v16
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i8> undef, i8 %b, i32 0
+ %splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp slt <vscale x 8 x i8> %splat, %va
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_slt_vi_nxv8i8_0(<vscale x 8 x i8> %va) {
+; CHECK-LABEL: icmp_slt_vi_nxv8i8_0:
+; CHECK: # %bb.0:
+; CHECK-NEXT: addi a0, zero, -16
+; CHECK-NEXT: vsetvli a1, zero, e8,m1,ta,mu
+; CHECK-NEXT: vmslt.vx v0, v16, a0
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i8> undef, i8 -16, i32 0
+ %splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp slt <vscale x 8 x i8> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_slt_vi_nxv8i8_1(<vscale x 8 x i8> %va) {
+; CHECK-LABEL: icmp_slt_vi_nxv8i8_1:
+; CHECK: # %bb.0:
+; CHECK-NEXT: addi a0, zero, -15
+; CHECK-NEXT: vsetvli a1, zero, e8,m1,ta,mu
+; CHECK-NEXT: vmslt.vx v0, v16, a0
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i8> undef, i8 -15, i32 0
+ %splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp slt <vscale x 8 x i8> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_slt_iv_nxv8i8_1(<vscale x 8 x i8> %va) {
+; CHECK-LABEL: icmp_slt_iv_nxv8i8_1:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu
+; CHECK-NEXT: vmsgt.vi v0, v16, -15
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i8> undef, i8 -15, i32 0
+ %splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp slt <vscale x 8 x i8> %splat, %va
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_slt_vi_nxv8i8_2(<vscale x 8 x i8> %va) {
+; CHECK-LABEL: icmp_slt_vi_nxv8i8_2:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu
+; CHECK-NEXT: vmslt.vx v0, v16, zero
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i8> undef, i8 0, i32 0
+ %splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp slt <vscale x 8 x i8> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_slt_vi_nxv8i8_3(<vscale x 8 x i8> %va) {
+; CHECK-LABEL: icmp_slt_vi_nxv8i8_3:
+; CHECK: # %bb.0:
+; CHECK-NEXT: addi a0, zero, 16
+; CHECK-NEXT: vsetvli a1, zero, e8,m1,ta,mu
+; CHECK-NEXT: vmslt.vx v0, v16, a0
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i8> undef, i8 16, i32 0
+ %splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp slt <vscale x 8 x i8> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_sle_vv_nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %vb) {
+; CHECK-LABEL: icmp_sle_vv_nxv8i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu
+; CHECK-NEXT: vmsle.vv v0, v16, v17
+; CHECK-NEXT: ret
+ %vc = icmp sle <vscale x 8 x i8> %va, %vb
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_sle_vx_nxv8i8(<vscale x 8 x i8> %va, i8 %b) {
+; CHECK-LABEL: icmp_sle_vx_nxv8i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, zero, e8,m1,ta,mu
+; CHECK-NEXT: vmsle.vx v0, v16, a0
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i8> undef, i8 %b, i32 0
+ %splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp sle <vscale x 8 x i8> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_sle_xv_nxv8i8(<vscale x 8 x i8> %va, i8 %b) {
+; CHECK-LABEL: icmp_sle_xv_nxv8i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, zero, e8,m1,ta,mu
+; CHECK-NEXT: vmv.v.x v25, a0
+; CHECK-NEXT: vmsle.vv v0, v25, v16
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i8> undef, i8 %b, i32 0
+ %splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp sle <vscale x 8 x i8> %splat, %va
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_sle_vi_nxv8i8_0(<vscale x 8 x i8> %va) {
+; CHECK-LABEL: icmp_sle_vi_nxv8i8_0:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu
+; CHECK-NEXT: vmsle.vi v0, v16, 5
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i8> undef, i8 5, i32 0
+ %splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp sle <vscale x 8 x i8> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_eq_vv_nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %vb) {
+; CHECK-LABEL: icmp_eq_vv_nxv8i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu
+; CHECK-NEXT: vmseq.vv v0, v16, v18
+; CHECK-NEXT: ret
+ %vc = icmp eq <vscale x 8 x i16> %va, %vb
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_eq_vx_nxv8i16(<vscale x 8 x i16> %va, i16 %b) {
+; CHECK-LABEL: icmp_eq_vx_nxv8i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, zero, e16,m2,ta,mu
+; CHECK-NEXT: vmseq.vx v0, v16, a0
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i16> undef, i16 %b, i32 0
+ %splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp eq <vscale x 8 x i16> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_eq_xv_nxv8i16(<vscale x 8 x i16> %va, i16 %b) {
+; CHECK-LABEL: icmp_eq_xv_nxv8i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, zero, e16,m2,ta,mu
+; CHECK-NEXT: vmv.v.x v26, a0
+; CHECK-NEXT: vmseq.vv v0, v26, v16
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i16> undef, i16 %b, i32 0
+ %splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp eq <vscale x 8 x i16> %splat, %va
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_eq_vi_nxv8i16_0(<vscale x 8 x i16> %va) {
+; CHECK-LABEL: icmp_eq_vi_nxv8i16_0:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu
+; CHECK-NEXT: vmseq.vi v0, v16, 0
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i16> undef, i16 0, i32 0
+ %splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp eq <vscale x 8 x i16> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_eq_vi_nxv8i16_1(<vscale x 8 x i16> %va) {
+; CHECK-LABEL: icmp_eq_vi_nxv8i16_1:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu
+; CHECK-NEXT: vmseq.vi v0, v16, 5
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i16> undef, i16 5, i32 0
+ %splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp eq <vscale x 8 x i16> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_eq_iv_nxv8i16_1(<vscale x 8 x i16> %va) {
+; CHECK-LABEL: icmp_eq_iv_nxv8i16_1:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu
+; CHECK-NEXT: vmseq.vi v0, v16, 5
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i16> undef, i16 5, i32 0
+ %splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp eq <vscale x 8 x i16> %splat, %va
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_ne_vv_nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %vb) {
+; CHECK-LABEL: icmp_ne_vv_nxv8i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu
+; CHECK-NEXT: vmsne.vv v0, v16, v18
+; CHECK-NEXT: ret
+ %vc = icmp ne <vscale x 8 x i16> %va, %vb
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_ne_vx_nxv8i16(<vscale x 8 x i16> %va, i16 %b) {
+; CHECK-LABEL: icmp_ne_vx_nxv8i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, zero, e16,m2,ta,mu
+; CHECK-NEXT: vmsne.vx v0, v16, a0
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i16> undef, i16 %b, i32 0
+ %splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp ne <vscale x 8 x i16> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_ne_xv_nxv8i16(<vscale x 8 x i16> %va, i16 %b) {
+; CHECK-LABEL: icmp_ne_xv_nxv8i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, zero, e16,m2,ta,mu
+; CHECK-NEXT: vmv.v.x v26, a0
+; CHECK-NEXT: vmsne.vv v0, v26, v16
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i16> undef, i16 %b, i32 0
+ %splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp ne <vscale x 8 x i16> %splat, %va
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_ne_vi_nxv8i16_0(<vscale x 8 x i16> %va) {
+; CHECK-LABEL: icmp_ne_vi_nxv8i16_0:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu
+; CHECK-NEXT: vmsne.vi v0, v16, 5
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i16> undef, i16 5, i32 0
+ %splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp ne <vscale x 8 x i16> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_ugt_vv_nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %vb) {
+; CHECK-LABEL: icmp_ugt_vv_nxv8i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu
+; CHECK-NEXT: vmsltu.vv v0, v18, v16
+; CHECK-NEXT: ret
+ %vc = icmp ugt <vscale x 8 x i16> %va, %vb
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_ugt_vx_nxv8i16(<vscale x 8 x i16> %va, i16 %b) {
+; CHECK-LABEL: icmp_ugt_vx_nxv8i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, zero, e16,m2,ta,mu
+; CHECK-NEXT: vmsgtu.vx v0, v16, a0
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i16> undef, i16 %b, i32 0
+ %splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp ugt <vscale x 8 x i16> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_ugt_xv_nxv8i16(<vscale x 8 x i16> %va, i16 %b) {
+; CHECK-LABEL: icmp_ugt_xv_nxv8i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, zero, e16,m2,ta,mu
+; CHECK-NEXT: vmv.v.x v26, a0
+; CHECK-NEXT: vmsltu.vv v0, v16, v26
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i16> undef, i16 %b, i32 0
+ %splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp ugt <vscale x 8 x i16> %splat, %va
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_ugt_vi_nxv8i16_0(<vscale x 8 x i16> %va) {
+; CHECK-LABEL: icmp_ugt_vi_nxv8i16_0:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu
+; CHECK-NEXT: vmsgtu.vi v0, v16, 5
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i16> undef, i16 5, i32 0
+ %splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp ugt <vscale x 8 x i16> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_uge_vv_nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %vb) {
+; CHECK-LABEL: icmp_uge_vv_nxv8i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu
+; CHECK-NEXT: vmsleu.vv v0, v18, v16
+; CHECK-NEXT: ret
+ %vc = icmp uge <vscale x 8 x i16> %va, %vb
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_uge_vx_nxv8i16(<vscale x 8 x i16> %va, i16 %b) {
+; CHECK-LABEL: icmp_uge_vx_nxv8i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, zero, e16,m2,ta,mu
+; CHECK-NEXT: vmv.v.x v26, a0
+; CHECK-NEXT: vmsleu.vv v0, v26, v16
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i16> undef, i16 %b, i32 0
+ %splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp uge <vscale x 8 x i16> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_uge_xv_nxv8i16(<vscale x 8 x i16> %va, i16 %b) {
+; CHECK-LABEL: icmp_uge_xv_nxv8i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, zero, e16,m2,ta,mu
+; CHECK-NEXT: vmv.v.x v26, a0
+; CHECK-NEXT: vmsleu.vv v0, v16, v26
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i16> undef, i16 %b, i32 0
+ %splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp uge <vscale x 8 x i16> %splat, %va
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_uge_vi_nxv8i16_0(<vscale x 8 x i16> %va) {
+; CHECK-LABEL: icmp_uge_vi_nxv8i16_0:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu
+; CHECK-NEXT: vmv.v.i v26, -16
+; CHECK-NEXT: vmsleu.vv v0, v26, v16
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i16> undef, i16 -16, i32 0
+ %splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp uge <vscale x 8 x i16> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_uge_vi_nxv8i16_1(<vscale x 8 x i16> %va) {
+; CHECK-LABEL: icmp_uge_vi_nxv8i16_1:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu
+; CHECK-NEXT: vmv.v.i v26, 15
+; CHECK-NEXT: vmsleu.vv v0, v26, v16
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i16> undef, i16 15, i32 0
+ %splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp uge <vscale x 8 x i16> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_uge_iv_nxv8i16_1(<vscale x 8 x i16> %va) {
+; CHECK-LABEL: icmp_uge_iv_nxv8i16_1:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu
+; CHECK-NEXT: vmsleu.vi v0, v16, 15
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i16> undef, i16 15, i32 0
+ %splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp uge <vscale x 8 x i16> %splat, %va
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_uge_vi_nxv8i16_2(<vscale x 8 x i16> %va) {
+; CHECK-LABEL: icmp_uge_vi_nxv8i16_2:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu
+; CHECK-NEXT: vmset.m v0
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i16> undef, i16 0, i32 0
+ %splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp uge <vscale x 8 x i16> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_uge_vi_nxv8i16_3(<vscale x 8 x i16> %va) {
+; CHECK-LABEL: icmp_uge_vi_nxv8i16_3:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu
+; CHECK-NEXT: vmv.v.i v26, 1
+; CHECK-NEXT: vmsleu.vv v0, v26, v16
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i16> undef, i16 1, i32 0
+ %splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp uge <vscale x 8 x i16> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_uge_vi_nxv8i16_4(<vscale x 8 x i16> %va) {
+; CHECK-LABEL: icmp_uge_vi_nxv8i16_4:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu
+; CHECK-NEXT: vmv.v.i v26, -15
+; CHECK-NEXT: vmsleu.vv v0, v26, v16
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i16> undef, i16 -15, i32 0
+ %splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp uge <vscale x 8 x i16> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_uge_vi_nxv8i16_5(<vscale x 8 x i16> %va) {
+; CHECK-LABEL: icmp_uge_vi_nxv8i16_5:
+; CHECK: # %bb.0:
+; CHECK-NEXT: addi a0, zero, 16
+; CHECK-NEXT: vsetvli a1, zero, e16,m2,ta,mu
+; CHECK-NEXT: vmv.v.x v26, a0
+; CHECK-NEXT: vmsleu.vv v0, v26, v16
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i16> undef, i16 16, i32 0
+ %splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp uge <vscale x 8 x i16> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_ult_vv_nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %vb) {
+; CHECK-LABEL: icmp_ult_vv_nxv8i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu
+; CHECK-NEXT: vmsltu.vv v0, v16, v18
+; CHECK-NEXT: ret
+ %vc = icmp ult <vscale x 8 x i16> %va, %vb
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_ult_vx_nxv8i16(<vscale x 8 x i16> %va, i16 %b) {
+; CHECK-LABEL: icmp_ult_vx_nxv8i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, zero, e16,m2,ta,mu
+; CHECK-NEXT: vmsltu.vx v0, v16, a0
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i16> undef, i16 %b, i32 0
+ %splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp ult <vscale x 8 x i16> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_ult_xv_nxv8i16(<vscale x 8 x i16> %va, i16 %b) {
+; CHECK-LABEL: icmp_ult_xv_nxv8i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, zero, e16,m2,ta,mu
+; CHECK-NEXT: vmv.v.x v26, a0
+; CHECK-NEXT: vmsltu.vv v0, v26, v16
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i16> undef, i16 %b, i32 0
+ %splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp ult <vscale x 8 x i16> %splat, %va
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_ult_vi_nxv8i16_0(<vscale x 8 x i16> %va) {
+; CHECK-LABEL: icmp_ult_vi_nxv8i16_0:
+; CHECK: # %bb.0:
+; CHECK-NEXT: addi a0, zero, -16
+; CHECK-NEXT: vsetvli a1, zero, e16,m2,ta,mu
+; CHECK-NEXT: vmsltu.vx v0, v16, a0
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i16> undef, i16 -16, i32 0
+ %splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp ult <vscale x 8 x i16> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_ult_vi_nxv8i16_1(<vscale x 8 x i16> %va) {
+; CHECK-LABEL: icmp_ult_vi_nxv8i16_1:
+; CHECK: # %bb.0:
+; CHECK-NEXT: addi a0, zero, -15
+; CHECK-NEXT: vsetvli a1, zero, e16,m2,ta,mu
+; CHECK-NEXT: vmsltu.vx v0, v16, a0
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i16> undef, i16 -15, i32 0
+ %splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp ult <vscale x 8 x i16> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_ult_iv_nxv8i16_1(<vscale x 8 x i16> %va) {
+; CHECK-LABEL: icmp_ult_iv_nxv8i16_1:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu
+; CHECK-NEXT: vmsgtu.vi v0, v16, -15
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i16> undef, i16 -15, i32 0
+ %splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp ult <vscale x 8 x i16> %splat, %va
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_ult_vi_nxv8i16_2(<vscale x 8 x i16> %va) {
+; CHECK-LABEL: icmp_ult_vi_nxv8i16_2:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu
+; CHECK-NEXT: vmclr.m v0
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i16> undef, i16 0, i32 0
+ %splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp ult <vscale x 8 x i16> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_ult_vi_nxv8i16_3(<vscale x 8 x i16> %va) {
+; CHECK-LABEL: icmp_ult_vi_nxv8i16_3:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu
+; CHECK-NEXT: vmseq.vi v0, v16, 0
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i16> undef, i16 1, i32 0
+ %splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp ult <vscale x 8 x i16> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_ult_vi_nxv8i16_4(<vscale x 8 x i16> %va) {
+; CHECK-LABEL: icmp_ult_vi_nxv8i16_4:
+; CHECK: # %bb.0:
+; CHECK-NEXT: addi a0, zero, 16
+; CHECK-NEXT: vsetvli a1, zero, e16,m2,ta,mu
+; CHECK-NEXT: vmsltu.vx v0, v16, a0
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i16> undef, i16 16, i32 0
+ %splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp ult <vscale x 8 x i16> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_ule_vv_nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %vb) {
+; CHECK-LABEL: icmp_ule_vv_nxv8i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu
+; CHECK-NEXT: vmsleu.vv v0, v16, v18
+; CHECK-NEXT: ret
+ %vc = icmp ule <vscale x 8 x i16> %va, %vb
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_ule_vx_nxv8i16(<vscale x 8 x i16> %va, i16 %b) {
+; CHECK-LABEL: icmp_ule_vx_nxv8i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, zero, e16,m2,ta,mu
+; CHECK-NEXT: vmsleu.vx v0, v16, a0
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i16> undef, i16 %b, i32 0
+ %splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp ule <vscale x 8 x i16> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_ule_xv_nxv8i16(<vscale x 8 x i16> %va, i16 %b) {
+; CHECK-LABEL: icmp_ule_xv_nxv8i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, zero, e16,m2,ta,mu
+; CHECK-NEXT: vmv.v.x v26, a0
+; CHECK-NEXT: vmsleu.vv v0, v26, v16
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i16> undef, i16 %b, i32 0
+ %splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp ule <vscale x 8 x i16> %splat, %va
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_ule_vi_nxv8i16_0(<vscale x 8 x i16> %va) {
+; CHECK-LABEL: icmp_ule_vi_nxv8i16_0:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu
+; CHECK-NEXT: vmsleu.vi v0, v16, 5
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i16> undef, i16 5, i32 0
+ %splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp ule <vscale x 8 x i16> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_sgt_vv_nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %vb) {
+; CHECK-LABEL: icmp_sgt_vv_nxv8i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu
+; CHECK-NEXT: vmslt.vv v0, v18, v16
+; CHECK-NEXT: ret
+ %vc = icmp sgt <vscale x 8 x i16> %va, %vb
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_sgt_vx_nxv8i16(<vscale x 8 x i16> %va, i16 %b) {
+; CHECK-LABEL: icmp_sgt_vx_nxv8i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, zero, e16,m2,ta,mu
+; CHECK-NEXT: vmsgt.vx v0, v16, a0
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i16> undef, i16 %b, i32 0
+ %splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp sgt <vscale x 8 x i16> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_sgt_xv_nxv8i16(<vscale x 8 x i16> %va, i16 %b) {
+; CHECK-LABEL: icmp_sgt_xv_nxv8i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, zero, e16,m2,ta,mu
+; CHECK-NEXT: vmv.v.x v26, a0
+; CHECK-NEXT: vmslt.vv v0, v16, v26
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i16> undef, i16 %b, i32 0
+ %splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp sgt <vscale x 8 x i16> %splat, %va
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_sgt_vi_nxv8i16_0(<vscale x 8 x i16> %va) {
+; CHECK-LABEL: icmp_sgt_vi_nxv8i16_0:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu
+; CHECK-NEXT: vmsgt.vi v0, v16, 5
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i16> undef, i16 5, i32 0
+ %splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp sgt <vscale x 8 x i16> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_sge_vv_nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %vb) {
+; CHECK-LABEL: icmp_sge_vv_nxv8i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu
+; CHECK-NEXT: vmsle.vv v0, v18, v16
+; CHECK-NEXT: ret
+ %vc = icmp sge <vscale x 8 x i16> %va, %vb
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_sge_vx_nxv8i16(<vscale x 8 x i16> %va, i16 %b) {
+; CHECK-LABEL: icmp_sge_vx_nxv8i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, zero, e16,m2,ta,mu
+; CHECK-NEXT: vmv.v.x v26, a0
+; CHECK-NEXT: vmsle.vv v0, v26, v16
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i16> undef, i16 %b, i32 0
+ %splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp sge <vscale x 8 x i16> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_sge_xv_nxv8i16(<vscale x 8 x i16> %va, i16 %b) {
+; CHECK-LABEL: icmp_sge_xv_nxv8i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, zero, e16,m2,ta,mu
+; CHECK-NEXT: vmv.v.x v26, a0
+; CHECK-NEXT: vmsle.vv v0, v16, v26
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i16> undef, i16 %b, i32 0
+ %splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp sge <vscale x 8 x i16> %splat, %va
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_sge_vi_nxv8i16_0(<vscale x 8 x i16> %va) {
+; CHECK-LABEL: icmp_sge_vi_nxv8i16_0:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu
+; CHECK-NEXT: vmv.v.i v26, -16
+; CHECK-NEXT: vmsle.vv v0, v26, v16
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i16> undef, i16 -16, i32 0
+ %splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp sge <vscale x 8 x i16> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_sge_vi_nxv8i16_1(<vscale x 8 x i16> %va) {
+; CHECK-LABEL: icmp_sge_vi_nxv8i16_1:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu
+; CHECK-NEXT: vmv.v.i v26, -15
+; CHECK-NEXT: vmsle.vv v0, v26, v16
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i16> undef, i16 -15, i32 0
+ %splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp sge <vscale x 8 x i16> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_sge_iv_nxv8i16_1(<vscale x 8 x i16> %va) {
+; CHECK-LABEL: icmp_sge_iv_nxv8i16_1:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu
+; CHECK-NEXT: vmsle.vi v0, v16, -15
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i16> undef, i16 -15, i32 0
+ %splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp sge <vscale x 8 x i16> %splat, %va
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_sge_vi_nxv8i16_2(<vscale x 8 x i16> %va) {
+; CHECK-LABEL: icmp_sge_vi_nxv8i16_2:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu
+; CHECK-NEXT: vmv.v.i v26, 0
+; CHECK-NEXT: vmsle.vv v0, v26, v16
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i16> undef, i16 0, i32 0
+ %splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp sge <vscale x 8 x i16> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_sge_vi_nxv8i16_3(<vscale x 8 x i16> %va) {
+; CHECK-LABEL: icmp_sge_vi_nxv8i16_3:
+; CHECK: # %bb.0:
+; CHECK-NEXT: addi a0, zero, 16
+; CHECK-NEXT: vsetvli a1, zero, e16,m2,ta,mu
+; CHECK-NEXT: vmv.v.x v26, a0
+; CHECK-NEXT: vmsle.vv v0, v26, v16
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i16> undef, i16 16, i32 0
+ %splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp sge <vscale x 8 x i16> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_slt_vv_nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %vb) {
+; CHECK-LABEL: icmp_slt_vv_nxv8i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu
+; CHECK-NEXT: vmslt.vv v0, v16, v18
+; CHECK-NEXT: ret
+ %vc = icmp slt <vscale x 8 x i16> %va, %vb
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_slt_vx_nxv8i16(<vscale x 8 x i16> %va, i16 %b) {
+; CHECK-LABEL: icmp_slt_vx_nxv8i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, zero, e16,m2,ta,mu
+; CHECK-NEXT: vmslt.vx v0, v16, a0
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i16> undef, i16 %b, i32 0
+ %splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp slt <vscale x 8 x i16> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_slt_xv_nxv8i16(<vscale x 8 x i16> %va, i16 %b) {
+; CHECK-LABEL: icmp_slt_xv_nxv8i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, zero, e16,m2,ta,mu
+; CHECK-NEXT: vmv.v.x v26, a0
+; CHECK-NEXT: vmslt.vv v0, v26, v16
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i16> undef, i16 %b, i32 0
+ %splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp slt <vscale x 8 x i16> %splat, %va
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_slt_vi_nxv8i16_0(<vscale x 8 x i16> %va) {
+; CHECK-LABEL: icmp_slt_vi_nxv8i16_0:
+; CHECK: # %bb.0:
+; CHECK-NEXT: addi a0, zero, -16
+; CHECK-NEXT: vsetvli a1, zero, e16,m2,ta,mu
+; CHECK-NEXT: vmslt.vx v0, v16, a0
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i16> undef, i16 -16, i32 0
+ %splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp slt <vscale x 8 x i16> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_slt_vi_nxv8i16_1(<vscale x 8 x i16> %va) {
+; CHECK-LABEL: icmp_slt_vi_nxv8i16_1:
+; CHECK: # %bb.0:
+; CHECK-NEXT: addi a0, zero, -15
+; CHECK-NEXT: vsetvli a1, zero, e16,m2,ta,mu
+; CHECK-NEXT: vmslt.vx v0, v16, a0
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i16> undef, i16 -15, i32 0
+ %splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp slt <vscale x 8 x i16> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_slt_iv_nxv8i16_1(<vscale x 8 x i16> %va) {
+; CHECK-LABEL: icmp_slt_iv_nxv8i16_1:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu
+; CHECK-NEXT: vmsgt.vi v0, v16, -15
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i16> undef, i16 -15, i32 0
+ %splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp slt <vscale x 8 x i16> %splat, %va
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_slt_vi_nxv8i16_2(<vscale x 8 x i16> %va) {
+; CHECK-LABEL: icmp_slt_vi_nxv8i16_2:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu
+; CHECK-NEXT: vmslt.vx v0, v16, zero
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i16> undef, i16 0, i32 0
+ %splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp slt <vscale x 8 x i16> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_slt_vi_nxv8i16_3(<vscale x 8 x i16> %va) {
+; CHECK-LABEL: icmp_slt_vi_nxv8i16_3:
+; CHECK: # %bb.0:
+; CHECK-NEXT: addi a0, zero, 16
+; CHECK-NEXT: vsetvli a1, zero, e16,m2,ta,mu
+; CHECK-NEXT: vmslt.vx v0, v16, a0
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i16> undef, i16 16, i32 0
+ %splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp slt <vscale x 8 x i16> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_sle_vv_nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %vb) {
+; CHECK-LABEL: icmp_sle_vv_nxv8i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu
+; CHECK-NEXT: vmsle.vv v0, v16, v18
+; CHECK-NEXT: ret
+ %vc = icmp sle <vscale x 8 x i16> %va, %vb
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_sle_vx_nxv8i16(<vscale x 8 x i16> %va, i16 %b) {
+; CHECK-LABEL: icmp_sle_vx_nxv8i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, zero, e16,m2,ta,mu
+; CHECK-NEXT: vmsle.vx v0, v16, a0
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i16> undef, i16 %b, i32 0
+ %splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp sle <vscale x 8 x i16> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_sle_xv_nxv8i16(<vscale x 8 x i16> %va, i16 %b) {
+; CHECK-LABEL: icmp_sle_xv_nxv8i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, zero, e16,m2,ta,mu
+; CHECK-NEXT: vmv.v.x v26, a0
+; CHECK-NEXT: vmsle.vv v0, v26, v16
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i16> undef, i16 %b, i32 0
+ %splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp sle <vscale x 8 x i16> %splat, %va
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_sle_vi_nxv8i16_0(<vscale x 8 x i16> %va) {
+; CHECK-LABEL: icmp_sle_vi_nxv8i16_0:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu
+; CHECK-NEXT: vmsle.vi v0, v16, 5
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i16> undef, i16 5, i32 0
+ %splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp sle <vscale x 8 x i16> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_eq_vv_nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %vb) {
+; CHECK-LABEL: icmp_eq_vv_nxv8i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu
+; CHECK-NEXT: vmseq.vv v0, v16, v20
+; CHECK-NEXT: ret
+ %vc = icmp eq <vscale x 8 x i32> %va, %vb
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_eq_vx_nxv8i32(<vscale x 8 x i32> %va, i32 %b) {
+; CHECK-LABEL: icmp_eq_vx_nxv8i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, zero, e32,m4,ta,mu
+; CHECK-NEXT: vmseq.vx v0, v16, a0
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i32> undef, i32 %b, i32 0
+ %splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp eq <vscale x 8 x i32> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_eq_xv_nxv8i32(<vscale x 8 x i32> %va, i32 %b) {
+; CHECK-LABEL: icmp_eq_xv_nxv8i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, zero, e32,m4,ta,mu
+; CHECK-NEXT: vmv.v.x v28, a0
+; CHECK-NEXT: vmseq.vv v0, v28, v16
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i32> undef, i32 %b, i32 0
+ %splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp eq <vscale x 8 x i32> %splat, %va
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_eq_vi_nxv8i32_0(<vscale x 8 x i32> %va) {
+; CHECK-LABEL: icmp_eq_vi_nxv8i32_0:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu
+; CHECK-NEXT: vmseq.vi v0, v16, 0
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i32> undef, i32 0, i32 0
+ %splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp eq <vscale x 8 x i32> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_eq_vi_nxv8i32_1(<vscale x 8 x i32> %va) {
+; CHECK-LABEL: icmp_eq_vi_nxv8i32_1:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu
+; CHECK-NEXT: vmseq.vi v0, v16, 5
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i32> undef, i32 5, i32 0
+ %splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp eq <vscale x 8 x i32> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_eq_iv_nxv8i32_1(<vscale x 8 x i32> %va) {
+; CHECK-LABEL: icmp_eq_iv_nxv8i32_1:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu
+; CHECK-NEXT: vmseq.vi v0, v16, 5
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i32> undef, i32 5, i32 0
+ %splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp eq <vscale x 8 x i32> %splat, %va
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_ne_vv_nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %vb) {
+; CHECK-LABEL: icmp_ne_vv_nxv8i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu
+; CHECK-NEXT: vmsne.vv v0, v16, v20
+; CHECK-NEXT: ret
+ %vc = icmp ne <vscale x 8 x i32> %va, %vb
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_ne_vx_nxv8i32(<vscale x 8 x i32> %va, i32 %b) {
+; CHECK-LABEL: icmp_ne_vx_nxv8i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, zero, e32,m4,ta,mu
+; CHECK-NEXT: vmsne.vx v0, v16, a0
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i32> undef, i32 %b, i32 0
+ %splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp ne <vscale x 8 x i32> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_ne_xv_nxv8i32(<vscale x 8 x i32> %va, i32 %b) {
+; CHECK-LABEL: icmp_ne_xv_nxv8i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, zero, e32,m4,ta,mu
+; CHECK-NEXT: vmv.v.x v28, a0
+; CHECK-NEXT: vmsne.vv v0, v28, v16
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i32> undef, i32 %b, i32 0
+ %splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp ne <vscale x 8 x i32> %splat, %va
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_ne_vi_nxv8i32_0(<vscale x 8 x i32> %va) {
+; CHECK-LABEL: icmp_ne_vi_nxv8i32_0:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu
+; CHECK-NEXT: vmsne.vi v0, v16, 5
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i32> undef, i32 5, i32 0
+ %splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp ne <vscale x 8 x i32> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_ugt_vv_nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %vb) {
+; CHECK-LABEL: icmp_ugt_vv_nxv8i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu
+; CHECK-NEXT: vmsltu.vv v0, v20, v16
+; CHECK-NEXT: ret
+ %vc = icmp ugt <vscale x 8 x i32> %va, %vb
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_ugt_vx_nxv8i32(<vscale x 8 x i32> %va, i32 %b) {
+; CHECK-LABEL: icmp_ugt_vx_nxv8i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, zero, e32,m4,ta,mu
+; CHECK-NEXT: vmsgtu.vx v0, v16, a0
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i32> undef, i32 %b, i32 0
+ %splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp ugt <vscale x 8 x i32> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_ugt_xv_nxv8i32(<vscale x 8 x i32> %va, i32 %b) {
+; CHECK-LABEL: icmp_ugt_xv_nxv8i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, zero, e32,m4,ta,mu
+; CHECK-NEXT: vmv.v.x v28, a0
+; CHECK-NEXT: vmsltu.vv v0, v16, v28
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i32> undef, i32 %b, i32 0
+ %splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp ugt <vscale x 8 x i32> %splat, %va
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_ugt_vi_nxv8i32_0(<vscale x 8 x i32> %va) {
+; CHECK-LABEL: icmp_ugt_vi_nxv8i32_0:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu
+; CHECK-NEXT: vmsgtu.vi v0, v16, 5
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i32> undef, i32 5, i32 0
+ %splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp ugt <vscale x 8 x i32> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_uge_vv_nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %vb) {
+; CHECK-LABEL: icmp_uge_vv_nxv8i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu
+; CHECK-NEXT: vmsleu.vv v0, v20, v16
+; CHECK-NEXT: ret
+ %vc = icmp uge <vscale x 8 x i32> %va, %vb
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_uge_vx_nxv8i32(<vscale x 8 x i32> %va, i32 %b) {
+; CHECK-LABEL: icmp_uge_vx_nxv8i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, zero, e32,m4,ta,mu
+; CHECK-NEXT: vmv.v.x v28, a0
+; CHECK-NEXT: vmsleu.vv v0, v28, v16
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i32> undef, i32 %b, i32 0
+ %splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp uge <vscale x 8 x i32> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_uge_xv_nxv8i32(<vscale x 8 x i32> %va, i32 %b) {
+; CHECK-LABEL: icmp_uge_xv_nxv8i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, zero, e32,m4,ta,mu
+; CHECK-NEXT: vmv.v.x v28, a0
+; CHECK-NEXT: vmsleu.vv v0, v16, v28
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i32> undef, i32 %b, i32 0
+ %splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp uge <vscale x 8 x i32> %splat, %va
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_uge_vi_nxv8i32_0(<vscale x 8 x i32> %va) {
+; CHECK-LABEL: icmp_uge_vi_nxv8i32_0:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu
+; CHECK-NEXT: vmv.v.i v28, -16
+; CHECK-NEXT: vmsleu.vv v0, v28, v16
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i32> undef, i32 -16, i32 0
+ %splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp uge <vscale x 8 x i32> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_uge_vi_nxv8i32_1(<vscale x 8 x i32> %va) {
+; CHECK-LABEL: icmp_uge_vi_nxv8i32_1:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu
+; CHECK-NEXT: vmv.v.i v28, 15
+; CHECK-NEXT: vmsleu.vv v0, v28, v16
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i32> undef, i32 15, i32 0
+ %splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp uge <vscale x 8 x i32> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_uge_iv_nxv8i32_1(<vscale x 8 x i32> %va) {
+; CHECK-LABEL: icmp_uge_iv_nxv8i32_1:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu
+; CHECK-NEXT: vmsleu.vi v0, v16, 15
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i32> undef, i32 15, i32 0
+ %splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp uge <vscale x 8 x i32> %splat, %va
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_uge_vi_nxv8i32_2(<vscale x 8 x i32> %va) {
+; CHECK-LABEL: icmp_uge_vi_nxv8i32_2:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu
+; CHECK-NEXT: vmset.m v0
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i32> undef, i32 0, i32 0
+ %splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp uge <vscale x 8 x i32> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_uge_vi_nxv8i32_3(<vscale x 8 x i32> %va) {
+; CHECK-LABEL: icmp_uge_vi_nxv8i32_3:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu
+; CHECK-NEXT: vmv.v.i v28, 1
+; CHECK-NEXT: vmsleu.vv v0, v28, v16
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i32> undef, i32 1, i32 0
+ %splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp uge <vscale x 8 x i32> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_uge_vi_nxv8i32_4(<vscale x 8 x i32> %va) {
+; CHECK-LABEL: icmp_uge_vi_nxv8i32_4:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu
+; CHECK-NEXT: vmv.v.i v28, -15
+; CHECK-NEXT: vmsleu.vv v0, v28, v16
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i32> undef, i32 -15, i32 0
+ %splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp uge <vscale x 8 x i32> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_uge_vi_nxv8i32_5(<vscale x 8 x i32> %va) {
+; CHECK-LABEL: icmp_uge_vi_nxv8i32_5:
+; CHECK: # %bb.0:
+; CHECK-NEXT: addi a0, zero, 16
+; CHECK-NEXT: vsetvli a1, zero, e32,m4,ta,mu
+; CHECK-NEXT: vmv.v.x v28, a0
+; CHECK-NEXT: vmsleu.vv v0, v28, v16
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i32> undef, i32 16, i32 0
+ %splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp uge <vscale x 8 x i32> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_ult_vv_nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %vb) {
+; CHECK-LABEL: icmp_ult_vv_nxv8i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu
+; CHECK-NEXT: vmsltu.vv v0, v16, v20
+; CHECK-NEXT: ret
+ %vc = icmp ult <vscale x 8 x i32> %va, %vb
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_ult_vx_nxv8i32(<vscale x 8 x i32> %va, i32 %b) {
+; CHECK-LABEL: icmp_ult_vx_nxv8i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, zero, e32,m4,ta,mu
+; CHECK-NEXT: vmsltu.vx v0, v16, a0
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i32> undef, i32 %b, i32 0
+ %splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp ult <vscale x 8 x i32> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_ult_xv_nxv8i32(<vscale x 8 x i32> %va, i32 %b) {
+; CHECK-LABEL: icmp_ult_xv_nxv8i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, zero, e32,m4,ta,mu
+; CHECK-NEXT: vmv.v.x v28, a0
+; CHECK-NEXT: vmsltu.vv v0, v28, v16
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i32> undef, i32 %b, i32 0
+ %splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp ult <vscale x 8 x i32> %splat, %va
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_ult_vi_nxv8i32_0(<vscale x 8 x i32> %va) {
+; CHECK-LABEL: icmp_ult_vi_nxv8i32_0:
+; CHECK: # %bb.0:
+; CHECK-NEXT: addi a0, zero, -16
+; CHECK-NEXT: vsetvli a1, zero, e32,m4,ta,mu
+; CHECK-NEXT: vmsltu.vx v0, v16, a0
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i32> undef, i32 -16, i32 0
+ %splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp ult <vscale x 8 x i32> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_ult_vi_nxv8i32_1(<vscale x 8 x i32> %va) {
+; CHECK-LABEL: icmp_ult_vi_nxv8i32_1:
+; CHECK: # %bb.0:
+; CHECK-NEXT: addi a0, zero, -15
+; CHECK-NEXT: vsetvli a1, zero, e32,m4,ta,mu
+; CHECK-NEXT: vmsltu.vx v0, v16, a0
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i32> undef, i32 -15, i32 0
+ %splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp ult <vscale x 8 x i32> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_ult_iv_nxv8i32_1(<vscale x 8 x i32> %va) {
+; CHECK-LABEL: icmp_ult_iv_nxv8i32_1:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu
+; CHECK-NEXT: vmsgtu.vi v0, v16, -15
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i32> undef, i32 -15, i32 0
+ %splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp ult <vscale x 8 x i32> %splat, %va
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_ult_vi_nxv8i32_2(<vscale x 8 x i32> %va) {
+; CHECK-LABEL: icmp_ult_vi_nxv8i32_2:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu
+; CHECK-NEXT: vmclr.m v0
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i32> undef, i32 0, i32 0
+ %splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp ult <vscale x 8 x i32> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_ult_vi_nxv8i32_3(<vscale x 8 x i32> %va) {
+; CHECK-LABEL: icmp_ult_vi_nxv8i32_3:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu
+; CHECK-NEXT: vmseq.vi v0, v16, 0
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i32> undef, i32 1, i32 0
+ %splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp ult <vscale x 8 x i32> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_ult_vi_nxv8i32_4(<vscale x 8 x i32> %va) {
+; CHECK-LABEL: icmp_ult_vi_nxv8i32_4:
+; CHECK: # %bb.0:
+; CHECK-NEXT: addi a0, zero, 16
+; CHECK-NEXT: vsetvli a1, zero, e32,m4,ta,mu
+; CHECK-NEXT: vmsltu.vx v0, v16, a0
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i32> undef, i32 16, i32 0
+ %splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp ult <vscale x 8 x i32> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_ule_vv_nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %vb) {
+; CHECK-LABEL: icmp_ule_vv_nxv8i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu
+; CHECK-NEXT: vmsleu.vv v0, v16, v20
+; CHECK-NEXT: ret
+ %vc = icmp ule <vscale x 8 x i32> %va, %vb
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_ule_vx_nxv8i32(<vscale x 8 x i32> %va, i32 %b) {
+; CHECK-LABEL: icmp_ule_vx_nxv8i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, zero, e32,m4,ta,mu
+; CHECK-NEXT: vmsleu.vx v0, v16, a0
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i32> undef, i32 %b, i32 0
+ %splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp ule <vscale x 8 x i32> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_ule_xv_nxv8i32(<vscale x 8 x i32> %va, i32 %b) {
+; CHECK-LABEL: icmp_ule_xv_nxv8i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, zero, e32,m4,ta,mu
+; CHECK-NEXT: vmv.v.x v28, a0
+; CHECK-NEXT: vmsleu.vv v0, v28, v16
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i32> undef, i32 %b, i32 0
+ %splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp ule <vscale x 8 x i32> %splat, %va
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_ule_vi_nxv8i32_0(<vscale x 8 x i32> %va) {
+; CHECK-LABEL: icmp_ule_vi_nxv8i32_0:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu
+; CHECK-NEXT: vmsleu.vi v0, v16, 5
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i32> undef, i32 5, i32 0
+ %splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp ule <vscale x 8 x i32> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_sgt_vv_nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %vb) {
+; CHECK-LABEL: icmp_sgt_vv_nxv8i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu
+; CHECK-NEXT: vmslt.vv v0, v20, v16
+; CHECK-NEXT: ret
+ %vc = icmp sgt <vscale x 8 x i32> %va, %vb
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_sgt_vx_nxv8i32(<vscale x 8 x i32> %va, i32 %b) {
+; CHECK-LABEL: icmp_sgt_vx_nxv8i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, zero, e32,m4,ta,mu
+; CHECK-NEXT: vmsgt.vx v0, v16, a0
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i32> undef, i32 %b, i32 0
+ %splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp sgt <vscale x 8 x i32> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_sgt_xv_nxv8i32(<vscale x 8 x i32> %va, i32 %b) {
+; CHECK-LABEL: icmp_sgt_xv_nxv8i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, zero, e32,m4,ta,mu
+; CHECK-NEXT: vmv.v.x v28, a0
+; CHECK-NEXT: vmslt.vv v0, v16, v28
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i32> undef, i32 %b, i32 0
+ %splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp sgt <vscale x 8 x i32> %splat, %va
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_sgt_vi_nxv8i32_0(<vscale x 8 x i32> %va) {
+; CHECK-LABEL: icmp_sgt_vi_nxv8i32_0:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu
+; CHECK-NEXT: vmsgt.vi v0, v16, 5
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i32> undef, i32 5, i32 0
+ %splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp sgt <vscale x 8 x i32> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_sge_vv_nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %vb) {
+; CHECK-LABEL: icmp_sge_vv_nxv8i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu
+; CHECK-NEXT: vmsle.vv v0, v20, v16
+; CHECK-NEXT: ret
+ %vc = icmp sge <vscale x 8 x i32> %va, %vb
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_sge_vx_nxv8i32(<vscale x 8 x i32> %va, i32 %b) {
+; CHECK-LABEL: icmp_sge_vx_nxv8i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, zero, e32,m4,ta,mu
+; CHECK-NEXT: vmv.v.x v28, a0
+; CHECK-NEXT: vmsle.vv v0, v28, v16
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i32> undef, i32 %b, i32 0
+ %splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp sge <vscale x 8 x i32> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_sge_xv_nxv8i32(<vscale x 8 x i32> %va, i32 %b) {
+; CHECK-LABEL: icmp_sge_xv_nxv8i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, zero, e32,m4,ta,mu
+; CHECK-NEXT: vmv.v.x v28, a0
+; CHECK-NEXT: vmsle.vv v0, v16, v28
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i32> undef, i32 %b, i32 0
+ %splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp sge <vscale x 8 x i32> %splat, %va
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_sge_vi_nxv8i32_0(<vscale x 8 x i32> %va) {
+; CHECK-LABEL: icmp_sge_vi_nxv8i32_0:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu
+; CHECK-NEXT: vmv.v.i v28, -16
+; CHECK-NEXT: vmsle.vv v0, v28, v16
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i32> undef, i32 -16, i32 0
+ %splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp sge <vscale x 8 x i32> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_sge_vi_nxv8i32_1(<vscale x 8 x i32> %va) {
+; CHECK-LABEL: icmp_sge_vi_nxv8i32_1:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu
+; CHECK-NEXT: vmv.v.i v28, -15
+; CHECK-NEXT: vmsle.vv v0, v28, v16
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i32> undef, i32 -15, i32 0
+ %splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp sge <vscale x 8 x i32> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_sge_iv_nxv8i32_1(<vscale x 8 x i32> %va) {
+; CHECK-LABEL: icmp_sge_iv_nxv8i32_1:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu
+; CHECK-NEXT: vmsle.vi v0, v16, -15
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i32> undef, i32 -15, i32 0
+ %splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp sge <vscale x 8 x i32> %splat, %va
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_sge_vi_nxv8i32_2(<vscale x 8 x i32> %va) {
+; CHECK-LABEL: icmp_sge_vi_nxv8i32_2:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu
+; CHECK-NEXT: vmv.v.i v28, 0
+; CHECK-NEXT: vmsle.vv v0, v28, v16
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i32> undef, i32 0, i32 0
+ %splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp sge <vscale x 8 x i32> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_sge_vi_nxv8i32_3(<vscale x 8 x i32> %va) {
+; CHECK-LABEL: icmp_sge_vi_nxv8i32_3:
+; CHECK: # %bb.0:
+; CHECK-NEXT: addi a0, zero, 16
+; CHECK-NEXT: vsetvli a1, zero, e32,m4,ta,mu
+; CHECK-NEXT: vmv.v.x v28, a0
+; CHECK-NEXT: vmsle.vv v0, v28, v16
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i32> undef, i32 16, i32 0
+ %splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp sge <vscale x 8 x i32> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_slt_vv_nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %vb) {
+; CHECK-LABEL: icmp_slt_vv_nxv8i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu
+; CHECK-NEXT: vmslt.vv v0, v16, v20
+; CHECK-NEXT: ret
+ %vc = icmp slt <vscale x 8 x i32> %va, %vb
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_slt_vx_nxv8i32(<vscale x 8 x i32> %va, i32 %b) {
+; CHECK-LABEL: icmp_slt_vx_nxv8i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, zero, e32,m4,ta,mu
+; CHECK-NEXT: vmslt.vx v0, v16, a0
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i32> undef, i32 %b, i32 0
+ %splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp slt <vscale x 8 x i32> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_slt_xv_nxv8i32(<vscale x 8 x i32> %va, i32 %b) {
+; CHECK-LABEL: icmp_slt_xv_nxv8i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, zero, e32,m4,ta,mu
+; CHECK-NEXT: vmv.v.x v28, a0
+; CHECK-NEXT: vmslt.vv v0, v28, v16
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i32> undef, i32 %b, i32 0
+ %splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp slt <vscale x 8 x i32> %splat, %va
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_slt_vi_nxv8i32_0(<vscale x 8 x i32> %va) {
+; CHECK-LABEL: icmp_slt_vi_nxv8i32_0:
+; CHECK: # %bb.0:
+; CHECK-NEXT: addi a0, zero, -16
+; CHECK-NEXT: vsetvli a1, zero, e32,m4,ta,mu
+; CHECK-NEXT: vmslt.vx v0, v16, a0
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i32> undef, i32 -16, i32 0
+ %splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp slt <vscale x 8 x i32> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_slt_vi_nxv8i32_1(<vscale x 8 x i32> %va) {
+; CHECK-LABEL: icmp_slt_vi_nxv8i32_1:
+; CHECK: # %bb.0:
+; CHECK-NEXT: addi a0, zero, -15
+; CHECK-NEXT: vsetvli a1, zero, e32,m4,ta,mu
+; CHECK-NEXT: vmslt.vx v0, v16, a0
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i32> undef, i32 -15, i32 0
+ %splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp slt <vscale x 8 x i32> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_slt_iv_nxv8i32_1(<vscale x 8 x i32> %va) {
+; CHECK-LABEL: icmp_slt_iv_nxv8i32_1:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu
+; CHECK-NEXT: vmsgt.vi v0, v16, -15
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i32> undef, i32 -15, i32 0
+ %splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp slt <vscale x 8 x i32> %splat, %va
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_slt_vi_nxv8i32_2(<vscale x 8 x i32> %va) {
+; CHECK-LABEL: icmp_slt_vi_nxv8i32_2:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu
+; CHECK-NEXT: vmslt.vx v0, v16, zero
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i32> undef, i32 0, i32 0
+ %splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp slt <vscale x 8 x i32> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_slt_vi_nxv8i32_3(<vscale x 8 x i32> %va) {
+; CHECK-LABEL: icmp_slt_vi_nxv8i32_3:
+; CHECK: # %bb.0:
+; CHECK-NEXT: addi a0, zero, 16
+; CHECK-NEXT: vsetvli a1, zero, e32,m4,ta,mu
+; CHECK-NEXT: vmslt.vx v0, v16, a0
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i32> undef, i32 16, i32 0
+ %splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp slt <vscale x 8 x i32> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_sle_vv_nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %vb) {
+; CHECK-LABEL: icmp_sle_vv_nxv8i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu
+; CHECK-NEXT: vmsle.vv v0, v16, v20
+; CHECK-NEXT: ret
+ %vc = icmp sle <vscale x 8 x i32> %va, %vb
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_sle_vx_nxv8i32(<vscale x 8 x i32> %va, i32 %b) {
+; CHECK-LABEL: icmp_sle_vx_nxv8i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, zero, e32,m4,ta,mu
+; CHECK-NEXT: vmsle.vx v0, v16, a0
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i32> undef, i32 %b, i32 0
+ %splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp sle <vscale x 8 x i32> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_sle_xv_nxv8i32(<vscale x 8 x i32> %va, i32 %b) {
+; CHECK-LABEL: icmp_sle_xv_nxv8i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, zero, e32,m4,ta,mu
+; CHECK-NEXT: vmv.v.x v28, a0
+; CHECK-NEXT: vmsle.vv v0, v28, v16
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i32> undef, i32 %b, i32 0
+ %splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp sle <vscale x 8 x i32> %splat, %va
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_sle_vi_nxv8i32_0(<vscale x 8 x i32> %va) {
+; CHECK-LABEL: icmp_sle_vi_nxv8i32_0:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu
+; CHECK-NEXT: vmsle.vi v0, v16, 5
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i32> undef, i32 5, i32 0
+ %splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp sle <vscale x 8 x i32> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_eq_vv_nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %vb) {
+; CHECK-LABEL: icmp_eq_vv_nxv8i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu
+; CHECK-NEXT: vle64.v v8, (a0)
+; CHECK-NEXT: vmseq.vv v0, v16, v8
+; CHECK-NEXT: ret
+ %vc = icmp eq <vscale x 8 x i64> %va, %vb
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_eq_vx_nxv8i64(<vscale x 8 x i64> %va, i64 %b) {
+; CHECK-LABEL: icmp_eq_vx_nxv8i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a2, zero, e64,m8,ta,mu
+; CHECK-NEXT: vmv.v.x v8, a1
+; CHECK-NEXT: addi a1, zero, 32
+; CHECK-NEXT: vsll.vx v8, v8, a1
+; CHECK-NEXT: vmv.v.x v24, a0
+; CHECK-NEXT: vsll.vx v24, v24, a1
+; CHECK-NEXT: vsrl.vx v24, v24, a1
+; CHECK-NEXT: vor.vv v8, v24, v8
+; CHECK-NEXT: vmseq.vv v0, v16, v8
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i64> undef, i64 %b, i32 0
+ %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp eq <vscale x 8 x i64> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_eq_xv_nxv8i64(<vscale x 8 x i64> %va, i64 %b) {
+; CHECK-LABEL: icmp_eq_xv_nxv8i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a2, zero, e64,m8,ta,mu
+; CHECK-NEXT: vmv.v.x v8, a1
+; CHECK-NEXT: addi a1, zero, 32
+; CHECK-NEXT: vsll.vx v8, v8, a1
+; CHECK-NEXT: vmv.v.x v24, a0
+; CHECK-NEXT: vsll.vx v24, v24, a1
+; CHECK-NEXT: vsrl.vx v24, v24, a1
+; CHECK-NEXT: vor.vv v8, v24, v8
+; CHECK-NEXT: vmseq.vv v0, v8, v16
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i64> undef, i64 %b, i32 0
+ %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp eq <vscale x 8 x i64> %splat, %va
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_eq_vi_nxv8i64_0(<vscale x 8 x i64> %va) {
+; CHECK-LABEL: icmp_eq_vi_nxv8i64_0:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu
+; CHECK-NEXT: vmseq.vi v0, v16, 0
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i64> undef, i64 0, i32 0
+ %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp eq <vscale x 8 x i64> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_eq_vi_nxv8i64_1(<vscale x 8 x i64> %va) {
+; CHECK-LABEL: icmp_eq_vi_nxv8i64_1:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu
+; CHECK-NEXT: vmseq.vi v0, v16, 5
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i64> undef, i64 5, i32 0
+ %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp eq <vscale x 8 x i64> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_eq_iv_nxv8i64_1(<vscale x 8 x i64> %va) {
+; CHECK-LABEL: icmp_eq_iv_nxv8i64_1:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu
+; CHECK-NEXT: vmseq.vi v0, v16, 5
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i64> undef, i64 5, i32 0
+ %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp eq <vscale x 8 x i64> %splat, %va
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_ne_vv_nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %vb) {
+; CHECK-LABEL: icmp_ne_vv_nxv8i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu
+; CHECK-NEXT: vle64.v v8, (a0)
+; CHECK-NEXT: vmsne.vv v0, v16, v8
+; CHECK-NEXT: ret
+ %vc = icmp ne <vscale x 8 x i64> %va, %vb
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_ne_vx_nxv8i64(<vscale x 8 x i64> %va, i64 %b) {
+; CHECK-LABEL: icmp_ne_vx_nxv8i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a2, zero, e64,m8,ta,mu
+; CHECK-NEXT: vmv.v.x v8, a1
+; CHECK-NEXT: addi a1, zero, 32
+; CHECK-NEXT: vsll.vx v8, v8, a1
+; CHECK-NEXT: vmv.v.x v24, a0
+; CHECK-NEXT: vsll.vx v24, v24, a1
+; CHECK-NEXT: vsrl.vx v24, v24, a1
+; CHECK-NEXT: vor.vv v8, v24, v8
+; CHECK-NEXT: vmsne.vv v0, v16, v8
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i64> undef, i64 %b, i32 0
+ %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp ne <vscale x 8 x i64> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_ne_xv_nxv8i64(<vscale x 8 x i64> %va, i64 %b) {
+; CHECK-LABEL: icmp_ne_xv_nxv8i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a2, zero, e64,m8,ta,mu
+; CHECK-NEXT: vmv.v.x v8, a1
+; CHECK-NEXT: addi a1, zero, 32
+; CHECK-NEXT: vsll.vx v8, v8, a1
+; CHECK-NEXT: vmv.v.x v24, a0
+; CHECK-NEXT: vsll.vx v24, v24, a1
+; CHECK-NEXT: vsrl.vx v24, v24, a1
+; CHECK-NEXT: vor.vv v8, v24, v8
+; CHECK-NEXT: vmsne.vv v0, v8, v16
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i64> undef, i64 %b, i32 0
+ %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp ne <vscale x 8 x i64> %splat, %va
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_ne_vi_nxv8i64_0(<vscale x 8 x i64> %va) {
+; CHECK-LABEL: icmp_ne_vi_nxv8i64_0:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu
+; CHECK-NEXT: vmsne.vi v0, v16, 5
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i64> undef, i64 5, i32 0
+ %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp ne <vscale x 8 x i64> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_ugt_vv_nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %vb) {
+; CHECK-LABEL: icmp_ugt_vv_nxv8i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu
+; CHECK-NEXT: vle64.v v8, (a0)
+; CHECK-NEXT: vmsltu.vv v0, v8, v16
+; CHECK-NEXT: ret
+ %vc = icmp ugt <vscale x 8 x i64> %va, %vb
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_ugt_vx_nxv8i64(<vscale x 8 x i64> %va, i64 %b) {
+; CHECK-LABEL: icmp_ugt_vx_nxv8i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a2, zero, e64,m8,ta,mu
+; CHECK-NEXT: vmv.v.x v8, a1
+; CHECK-NEXT: addi a1, zero, 32
+; CHECK-NEXT: vsll.vx v8, v8, a1
+; CHECK-NEXT: vmv.v.x v24, a0
+; CHECK-NEXT: vsll.vx v24, v24, a1
+; CHECK-NEXT: vsrl.vx v24, v24, a1
+; CHECK-NEXT: vor.vv v8, v24, v8
+; CHECK-NEXT: vmsltu.vv v0, v8, v16
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i64> undef, i64 %b, i32 0
+ %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp ugt <vscale x 8 x i64> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_ugt_xv_nxv8i64(<vscale x 8 x i64> %va, i64 %b) {
+; CHECK-LABEL: icmp_ugt_xv_nxv8i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a2, zero, e64,m8,ta,mu
+; CHECK-NEXT: vmv.v.x v8, a1
+; CHECK-NEXT: addi a1, zero, 32
+; CHECK-NEXT: vsll.vx v8, v8, a1
+; CHECK-NEXT: vmv.v.x v24, a0
+; CHECK-NEXT: vsll.vx v24, v24, a1
+; CHECK-NEXT: vsrl.vx v24, v24, a1
+; CHECK-NEXT: vor.vv v8, v24, v8
+; CHECK-NEXT: vmsltu.vv v0, v16, v8
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i64> undef, i64 %b, i32 0
+ %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp ugt <vscale x 8 x i64> %splat, %va
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_ugt_vi_nxv8i64_0(<vscale x 8 x i64> %va) {
+; CHECK-LABEL: icmp_ugt_vi_nxv8i64_0:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu
+; CHECK-NEXT: vmsgtu.vi v0, v16, 5
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i64> undef, i64 5, i32 0
+ %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp ugt <vscale x 8 x i64> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_uge_vv_nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %vb) {
+; CHECK-LABEL: icmp_uge_vv_nxv8i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu
+; CHECK-NEXT: vle64.v v8, (a0)
+; CHECK-NEXT: vmsleu.vv v0, v8, v16
+; CHECK-NEXT: ret
+ %vc = icmp uge <vscale x 8 x i64> %va, %vb
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_uge_vx_nxv8i64(<vscale x 8 x i64> %va, i64 %b) {
+; CHECK-LABEL: icmp_uge_vx_nxv8i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a2, zero, e64,m8,ta,mu
+; CHECK-NEXT: vmv.v.x v8, a1
+; CHECK-NEXT: addi a1, zero, 32
+; CHECK-NEXT: vsll.vx v8, v8, a1
+; CHECK-NEXT: vmv.v.x v24, a0
+; CHECK-NEXT: vsll.vx v24, v24, a1
+; CHECK-NEXT: vsrl.vx v24, v24, a1
+; CHECK-NEXT: vor.vv v8, v24, v8
+; CHECK-NEXT: vmsleu.vv v0, v8, v16
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i64> undef, i64 %b, i32 0
+ %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp uge <vscale x 8 x i64> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_uge_xv_nxv8i64(<vscale x 8 x i64> %va, i64 %b) {
+; CHECK-LABEL: icmp_uge_xv_nxv8i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a2, zero, e64,m8,ta,mu
+; CHECK-NEXT: vmv.v.x v8, a1
+; CHECK-NEXT: addi a1, zero, 32
+; CHECK-NEXT: vsll.vx v8, v8, a1
+; CHECK-NEXT: vmv.v.x v24, a0
+; CHECK-NEXT: vsll.vx v24, v24, a1
+; CHECK-NEXT: vsrl.vx v24, v24, a1
+; CHECK-NEXT: vor.vv v8, v24, v8
+; CHECK-NEXT: vmsleu.vv v0, v16, v8
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i64> undef, i64 %b, i32 0
+ %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp uge <vscale x 8 x i64> %splat, %va
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_uge_vi_nxv8i64_0(<vscale x 8 x i64> %va) {
+; CHECK-LABEL: icmp_uge_vi_nxv8i64_0:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu
+; CHECK-NEXT: vmv.v.i v8, -16
+; CHECK-NEXT: vmsleu.vv v0, v8, v16
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i64> undef, i64 -16, i32 0
+ %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp uge <vscale x 8 x i64> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_uge_vi_nxv8i64_1(<vscale x 8 x i64> %va) {
+; CHECK-LABEL: icmp_uge_vi_nxv8i64_1:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu
+; CHECK-NEXT: vmv.v.i v8, 15
+; CHECK-NEXT: vmsleu.vv v0, v8, v16
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i64> undef, i64 15, i32 0
+ %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp uge <vscale x 8 x i64> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_uge_iv_nxv8i64_1(<vscale x 8 x i64> %va) {
+; CHECK-LABEL: icmp_uge_iv_nxv8i64_1:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu
+; CHECK-NEXT: vmsleu.vi v0, v16, 15
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i64> undef, i64 15, i32 0
+ %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp uge <vscale x 8 x i64> %splat, %va
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_uge_vi_nxv8i64_2(<vscale x 8 x i64> %va) {
+; CHECK-LABEL: icmp_uge_vi_nxv8i64_2:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu
+; CHECK-NEXT: vmset.m v0
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i64> undef, i64 0, i32 0
+ %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp uge <vscale x 8 x i64> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_uge_vi_nxv8i64_3(<vscale x 8 x i64> %va) {
+; CHECK-LABEL: icmp_uge_vi_nxv8i64_3:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu
+; CHECK-NEXT: vmv.v.i v8, 1
+; CHECK-NEXT: vmsleu.vv v0, v8, v16
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i64> undef, i64 1, i32 0
+ %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp uge <vscale x 8 x i64> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_uge_vi_nxv8i64_4(<vscale x 8 x i64> %va) {
+; CHECK-LABEL: icmp_uge_vi_nxv8i64_4:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu
+; CHECK-NEXT: vmv.v.i v8, -15
+; CHECK-NEXT: vmsleu.vv v0, v8, v16
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i64> undef, i64 -15, i32 0
+ %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp uge <vscale x 8 x i64> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_uge_vi_nxv8i64_5(<vscale x 8 x i64> %va) {
+; CHECK-LABEL: icmp_uge_vi_nxv8i64_5:
+; CHECK: # %bb.0:
+; CHECK-NEXT: addi a0, zero, 16
+; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu
+; CHECK-NEXT: vmv.v.x v8, a0
+; CHECK-NEXT: vmsleu.vv v0, v8, v16
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i64> undef, i64 16, i32 0
+ %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp uge <vscale x 8 x i64> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_ult_vv_nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %vb) {
+; CHECK-LABEL: icmp_ult_vv_nxv8i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu
+; CHECK-NEXT: vle64.v v8, (a0)
+; CHECK-NEXT: vmsltu.vv v0, v16, v8
+; CHECK-NEXT: ret
+ %vc = icmp ult <vscale x 8 x i64> %va, %vb
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_ult_vx_nxv8i64(<vscale x 8 x i64> %va, i64 %b) {
+; CHECK-LABEL: icmp_ult_vx_nxv8i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a2, zero, e64,m8,ta,mu
+; CHECK-NEXT: vmv.v.x v8, a1
+; CHECK-NEXT: addi a1, zero, 32
+; CHECK-NEXT: vsll.vx v8, v8, a1
+; CHECK-NEXT: vmv.v.x v24, a0
+; CHECK-NEXT: vsll.vx v24, v24, a1
+; CHECK-NEXT: vsrl.vx v24, v24, a1
+; CHECK-NEXT: vor.vv v8, v24, v8
+; CHECK-NEXT: vmsltu.vv v0, v16, v8
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i64> undef, i64 %b, i32 0
+ %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp ult <vscale x 8 x i64> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_ult_xv_nxv8i64(<vscale x 8 x i64> %va, i64 %b) {
+; CHECK-LABEL: icmp_ult_xv_nxv8i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a2, zero, e64,m8,ta,mu
+; CHECK-NEXT: vmv.v.x v8, a1
+; CHECK-NEXT: addi a1, zero, 32
+; CHECK-NEXT: vsll.vx v8, v8, a1
+; CHECK-NEXT: vmv.v.x v24, a0
+; CHECK-NEXT: vsll.vx v24, v24, a1
+; CHECK-NEXT: vsrl.vx v24, v24, a1
+; CHECK-NEXT: vor.vv v8, v24, v8
+; CHECK-NEXT: vmsltu.vv v0, v8, v16
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i64> undef, i64 %b, i32 0
+ %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp ult <vscale x 8 x i64> %splat, %va
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_ult_vi_nxv8i64_0(<vscale x 8 x i64> %va) {
+; CHECK-LABEL: icmp_ult_vi_nxv8i64_0:
+; CHECK: # %bb.0:
+; CHECK-NEXT: addi a0, zero, -16
+; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu
+; CHECK-NEXT: vmsltu.vx v0, v16, a0
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i64> undef, i64 -16, i32 0
+ %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp ult <vscale x 8 x i64> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_ult_vi_nxv8i64_1(<vscale x 8 x i64> %va) {
+; CHECK-LABEL: icmp_ult_vi_nxv8i64_1:
+; CHECK: # %bb.0:
+; CHECK-NEXT: addi a0, zero, -15
+; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu
+; CHECK-NEXT: vmsltu.vx v0, v16, a0
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i64> undef, i64 -15, i32 0
+ %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp ult <vscale x 8 x i64> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_ult_iv_nxv8i64_1(<vscale x 8 x i64> %va) {
+; CHECK-LABEL: icmp_ult_iv_nxv8i64_1:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu
+; CHECK-NEXT: vmsgtu.vi v0, v16, -15
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i64> undef, i64 -15, i32 0
+ %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp ult <vscale x 8 x i64> %splat, %va
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_ult_vi_nxv8i64_2(<vscale x 8 x i64> %va) {
+; CHECK-LABEL: icmp_ult_vi_nxv8i64_2:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu
+; CHECK-NEXT: vmclr.m v0
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i64> undef, i64 0, i32 0
+ %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp ult <vscale x 8 x i64> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_ult_vi_nxv8i64_3(<vscale x 8 x i64> %va) {
+; CHECK-LABEL: icmp_ult_vi_nxv8i64_3:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu
+; CHECK-NEXT: vmseq.vi v0, v16, 0
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i64> undef, i64 1, i32 0
+ %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp ult <vscale x 8 x i64> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_ult_vi_nxv8i64_4(<vscale x 8 x i64> %va) {
+; CHECK-LABEL: icmp_ult_vi_nxv8i64_4:
+; CHECK: # %bb.0:
+; CHECK-NEXT: addi a0, zero, 16
+; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu
+; CHECK-NEXT: vmsltu.vx v0, v16, a0
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i64> undef, i64 16, i32 0
+ %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp ult <vscale x 8 x i64> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_ule_vv_nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %vb) {
+; CHECK-LABEL: icmp_ule_vv_nxv8i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu
+; CHECK-NEXT: vle64.v v8, (a0)
+; CHECK-NEXT: vmsleu.vv v0, v16, v8
+; CHECK-NEXT: ret
+ %vc = icmp ule <vscale x 8 x i64> %va, %vb
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_ule_vx_nxv8i64(<vscale x 8 x i64> %va, i64 %b) {
+; CHECK-LABEL: icmp_ule_vx_nxv8i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a2, zero, e64,m8,ta,mu
+; CHECK-NEXT: vmv.v.x v8, a1
+; CHECK-NEXT: addi a1, zero, 32
+; CHECK-NEXT: vsll.vx v8, v8, a1
+; CHECK-NEXT: vmv.v.x v24, a0
+; CHECK-NEXT: vsll.vx v24, v24, a1
+; CHECK-NEXT: vsrl.vx v24, v24, a1
+; CHECK-NEXT: vor.vv v8, v24, v8
+; CHECK-NEXT: vmsleu.vv v0, v16, v8
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i64> undef, i64 %b, i32 0
+ %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp ule <vscale x 8 x i64> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_ule_xv_nxv8i64(<vscale x 8 x i64> %va, i64 %b) {
+; CHECK-LABEL: icmp_ule_xv_nxv8i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a2, zero, e64,m8,ta,mu
+; CHECK-NEXT: vmv.v.x v8, a1
+; CHECK-NEXT: addi a1, zero, 32
+; CHECK-NEXT: vsll.vx v8, v8, a1
+; CHECK-NEXT: vmv.v.x v24, a0
+; CHECK-NEXT: vsll.vx v24, v24, a1
+; CHECK-NEXT: vsrl.vx v24, v24, a1
+; CHECK-NEXT: vor.vv v8, v24, v8
+; CHECK-NEXT: vmsleu.vv v0, v8, v16
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i64> undef, i64 %b, i32 0
+ %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp ule <vscale x 8 x i64> %splat, %va
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_ule_vi_nxv8i64_0(<vscale x 8 x i64> %va) {
+; CHECK-LABEL: icmp_ule_vi_nxv8i64_0:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu
+; CHECK-NEXT: vmsleu.vi v0, v16, 5
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i64> undef, i64 5, i32 0
+ %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp ule <vscale x 8 x i64> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_sgt_vv_nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %vb) {
+; CHECK-LABEL: icmp_sgt_vv_nxv8i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu
+; CHECK-NEXT: vle64.v v8, (a0)
+; CHECK-NEXT: vmslt.vv v0, v8, v16
+; CHECK-NEXT: ret
+ %vc = icmp sgt <vscale x 8 x i64> %va, %vb
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_sgt_vx_nxv8i64(<vscale x 8 x i64> %va, i64 %b) {
+; CHECK-LABEL: icmp_sgt_vx_nxv8i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a2, zero, e64,m8,ta,mu
+; CHECK-NEXT: vmv.v.x v8, a1
+; CHECK-NEXT: addi a1, zero, 32
+; CHECK-NEXT: vsll.vx v8, v8, a1
+; CHECK-NEXT: vmv.v.x v24, a0
+; CHECK-NEXT: vsll.vx v24, v24, a1
+; CHECK-NEXT: vsrl.vx v24, v24, a1
+; CHECK-NEXT: vor.vv v8, v24, v8
+; CHECK-NEXT: vmslt.vv v0, v8, v16
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i64> undef, i64 %b, i32 0
+ %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp sgt <vscale x 8 x i64> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_sgt_xv_nxv8i64(<vscale x 8 x i64> %va, i64 %b) {
+; CHECK-LABEL: icmp_sgt_xv_nxv8i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a2, zero, e64,m8,ta,mu
+; CHECK-NEXT: vmv.v.x v8, a1
+; CHECK-NEXT: addi a1, zero, 32
+; CHECK-NEXT: vsll.vx v8, v8, a1
+; CHECK-NEXT: vmv.v.x v24, a0
+; CHECK-NEXT: vsll.vx v24, v24, a1
+; CHECK-NEXT: vsrl.vx v24, v24, a1
+; CHECK-NEXT: vor.vv v8, v24, v8
+; CHECK-NEXT: vmslt.vv v0, v16, v8
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i64> undef, i64 %b, i32 0
+ %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp sgt <vscale x 8 x i64> %splat, %va
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_sgt_vi_nxv8i64_0(<vscale x 8 x i64> %va) {
+; CHECK-LABEL: icmp_sgt_vi_nxv8i64_0:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu
+; CHECK-NEXT: vmsgt.vi v0, v16, 5
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i64> undef, i64 5, i32 0
+ %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp sgt <vscale x 8 x i64> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_sge_vv_nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %vb) {
+; CHECK-LABEL: icmp_sge_vv_nxv8i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu
+; CHECK-NEXT: vle64.v v8, (a0)
+; CHECK-NEXT: vmsle.vv v0, v8, v16
+; CHECK-NEXT: ret
+ %vc = icmp sge <vscale x 8 x i64> %va, %vb
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_sge_vx_nxv8i64(<vscale x 8 x i64> %va, i64 %b) {
+; CHECK-LABEL: icmp_sge_vx_nxv8i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a2, zero, e64,m8,ta,mu
+; CHECK-NEXT: vmv.v.x v8, a1
+; CHECK-NEXT: addi a1, zero, 32
+; CHECK-NEXT: vsll.vx v8, v8, a1
+; CHECK-NEXT: vmv.v.x v24, a0
+; CHECK-NEXT: vsll.vx v24, v24, a1
+; CHECK-NEXT: vsrl.vx v24, v24, a1
+; CHECK-NEXT: vor.vv v8, v24, v8
+; CHECK-NEXT: vmsle.vv v0, v8, v16
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i64> undef, i64 %b, i32 0
+ %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp sge <vscale x 8 x i64> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_sge_xv_nxv8i64(<vscale x 8 x i64> %va, i64 %b) {
+; CHECK-LABEL: icmp_sge_xv_nxv8i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a2, zero, e64,m8,ta,mu
+; CHECK-NEXT: vmv.v.x v8, a1
+; CHECK-NEXT: addi a1, zero, 32
+; CHECK-NEXT: vsll.vx v8, v8, a1
+; CHECK-NEXT: vmv.v.x v24, a0
+; CHECK-NEXT: vsll.vx v24, v24, a1
+; CHECK-NEXT: vsrl.vx v24, v24, a1
+; CHECK-NEXT: vor.vv v8, v24, v8
+; CHECK-NEXT: vmsle.vv v0, v16, v8
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i64> undef, i64 %b, i32 0
+ %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp sge <vscale x 8 x i64> %splat, %va
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_sge_vi_nxv8i64_0(<vscale x 8 x i64> %va) {
+; CHECK-LABEL: icmp_sge_vi_nxv8i64_0:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu
+; CHECK-NEXT: vmv.v.i v8, -16
+; CHECK-NEXT: vmsle.vv v0, v8, v16
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i64> undef, i64 -16, i32 0
+ %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp sge <vscale x 8 x i64> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_sge_vi_nxv8i64_1(<vscale x 8 x i64> %va) {
+; CHECK-LABEL: icmp_sge_vi_nxv8i64_1:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu
+; CHECK-NEXT: vmv.v.i v8, -15
+; CHECK-NEXT: vmsle.vv v0, v8, v16
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i64> undef, i64 -15, i32 0
+ %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp sge <vscale x 8 x i64> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_sge_iv_nxv8i64_1(<vscale x 8 x i64> %va) {
+; CHECK-LABEL: icmp_sge_iv_nxv8i64_1:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu
+; CHECK-NEXT: vmsle.vi v0, v16, -15
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i64> undef, i64 -15, i32 0
+ %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp sge <vscale x 8 x i64> %splat, %va
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_sge_vi_nxv8i64_2(<vscale x 8 x i64> %va) {
+; CHECK-LABEL: icmp_sge_vi_nxv8i64_2:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu
+; CHECK-NEXT: vmv.v.i v8, 0
+; CHECK-NEXT: vmsle.vv v0, v8, v16
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i64> undef, i64 0, i32 0
+ %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp sge <vscale x 8 x i64> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_sge_vi_nxv8i64_3(<vscale x 8 x i64> %va) {
+; CHECK-LABEL: icmp_sge_vi_nxv8i64_3:
+; CHECK: # %bb.0:
+; CHECK-NEXT: addi a0, zero, 16
+; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu
+; CHECK-NEXT: vmv.v.x v8, a0
+; CHECK-NEXT: vmsle.vv v0, v8, v16
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i64> undef, i64 16, i32 0
+ %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp sge <vscale x 8 x i64> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_slt_vv_nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %vb) {
+; CHECK-LABEL: icmp_slt_vv_nxv8i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu
+; CHECK-NEXT: vle64.v v8, (a0)
+; CHECK-NEXT: vmslt.vv v0, v16, v8
+; CHECK-NEXT: ret
+ %vc = icmp slt <vscale x 8 x i64> %va, %vb
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_slt_vx_nxv8i64(<vscale x 8 x i64> %va, i64 %b) {
+; CHECK-LABEL: icmp_slt_vx_nxv8i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a2, zero, e64,m8,ta,mu
+; CHECK-NEXT: vmv.v.x v8, a1
+; CHECK-NEXT: addi a1, zero, 32
+; CHECK-NEXT: vsll.vx v8, v8, a1
+; CHECK-NEXT: vmv.v.x v24, a0
+; CHECK-NEXT: vsll.vx v24, v24, a1
+; CHECK-NEXT: vsrl.vx v24, v24, a1
+; CHECK-NEXT: vor.vv v8, v24, v8
+; CHECK-NEXT: vmslt.vv v0, v16, v8
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i64> undef, i64 %b, i32 0
+ %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp slt <vscale x 8 x i64> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_slt_xv_nxv8i64(<vscale x 8 x i64> %va, i64 %b) {
+; CHECK-LABEL: icmp_slt_xv_nxv8i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a2, zero, e64,m8,ta,mu
+; CHECK-NEXT: vmv.v.x v8, a1
+; CHECK-NEXT: addi a1, zero, 32
+; CHECK-NEXT: vsll.vx v8, v8, a1
+; CHECK-NEXT: vmv.v.x v24, a0
+; CHECK-NEXT: vsll.vx v24, v24, a1
+; CHECK-NEXT: vsrl.vx v24, v24, a1
+; CHECK-NEXT: vor.vv v8, v24, v8
+; CHECK-NEXT: vmslt.vv v0, v8, v16
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i64> undef, i64 %b, i32 0
+ %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp slt <vscale x 8 x i64> %splat, %va
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_slt_vi_nxv8i64_0(<vscale x 8 x i64> %va) {
+; CHECK-LABEL: icmp_slt_vi_nxv8i64_0:
+; CHECK: # %bb.0:
+; CHECK-NEXT: addi a0, zero, -16
+; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu
+; CHECK-NEXT: vmslt.vx v0, v16, a0
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i64> undef, i64 -16, i32 0
+ %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp slt <vscale x 8 x i64> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_slt_vi_nxv8i64_1(<vscale x 8 x i64> %va) {
+; CHECK-LABEL: icmp_slt_vi_nxv8i64_1:
+; CHECK: # %bb.0:
+; CHECK-NEXT: addi a0, zero, -15
+; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu
+; CHECK-NEXT: vmslt.vx v0, v16, a0
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i64> undef, i64 -15, i32 0
+ %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp slt <vscale x 8 x i64> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_slt_iv_nxv8i64_1(<vscale x 8 x i64> %va) {
+; CHECK-LABEL: icmp_slt_iv_nxv8i64_1:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu
+; CHECK-NEXT: vmsgt.vi v0, v16, -15
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i64> undef, i64 -15, i32 0
+ %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp slt <vscale x 8 x i64> %splat, %va
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_slt_vi_nxv8i64_2(<vscale x 8 x i64> %va) {
+; CHECK-LABEL: icmp_slt_vi_nxv8i64_2:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu
+; CHECK-NEXT: vmslt.vx v0, v16, zero
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i64> undef, i64 0, i32 0
+ %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp slt <vscale x 8 x i64> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_slt_vi_nxv8i64_3(<vscale x 8 x i64> %va) {
+; CHECK-LABEL: icmp_slt_vi_nxv8i64_3:
+; CHECK: # %bb.0:
+; CHECK-NEXT: addi a0, zero, 16
+; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu
+; CHECK-NEXT: vmslt.vx v0, v16, a0
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i64> undef, i64 16, i32 0
+ %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp slt <vscale x 8 x i64> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_sle_vv_nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %vb) {
+; CHECK-LABEL: icmp_sle_vv_nxv8i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu
+; CHECK-NEXT: vle64.v v8, (a0)
+; CHECK-NEXT: vmsle.vv v0, v16, v8
+; CHECK-NEXT: ret
+ %vc = icmp sle <vscale x 8 x i64> %va, %vb
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_sle_vx_nxv8i64(<vscale x 8 x i64> %va, i64 %b) {
+; CHECK-LABEL: icmp_sle_vx_nxv8i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a2, zero, e64,m8,ta,mu
+; CHECK-NEXT: vmv.v.x v8, a1
+; CHECK-NEXT: addi a1, zero, 32
+; CHECK-NEXT: vsll.vx v8, v8, a1
+; CHECK-NEXT: vmv.v.x v24, a0
+; CHECK-NEXT: vsll.vx v24, v24, a1
+; CHECK-NEXT: vsrl.vx v24, v24, a1
+; CHECK-NEXT: vor.vv v8, v24, v8
+; CHECK-NEXT: vmsle.vv v0, v16, v8
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i64> undef, i64 %b, i32 0
+ %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp sle <vscale x 8 x i64> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_sle_xv_nxv8i64(<vscale x 8 x i64> %va, i64 %b) {
+; CHECK-LABEL: icmp_sle_xv_nxv8i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a2, zero, e64,m8,ta,mu
+; CHECK-NEXT: vmv.v.x v8, a1
+; CHECK-NEXT: addi a1, zero, 32
+; CHECK-NEXT: vsll.vx v8, v8, a1
+; CHECK-NEXT: vmv.v.x v24, a0
+; CHECK-NEXT: vsll.vx v24, v24, a1
+; CHECK-NEXT: vsrl.vx v24, v24, a1
+; CHECK-NEXT: vor.vv v8, v24, v8
+; CHECK-NEXT: vmsle.vv v0, v8, v16
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i64> undef, i64 %b, i32 0
+ %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp sle <vscale x 8 x i64> %splat, %va
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_sle_vi_nxv8i64_0(<vscale x 8 x i64> %va) {
+; CHECK-LABEL: icmp_sle_vi_nxv8i64_0:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu
+; CHECK-NEXT: vmsle.vi v0, v16, 5
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i64> undef, i64 5, i32 0
+ %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp sle <vscale x 8 x i64> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+; Check a setcc with two constant splats, which would previously get stuck in
+; an infinite loop. DAGCombine isn't clever enough to constant-fold
+; splat_vectors but could continuously swap the operands, trying to put the
+; splat on the RHS.
+define <vscale x 8 x i1> @icmp_eq_ii_nxv8i8() {
+; CHECK-LABEL: icmp_eq_ii_nxv8i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu
+; CHECK-NEXT: vmv.v.i v25, 5
+; CHECK-NEXT: vmseq.vi v0, v25, 2
+; CHECK-NEXT: ret
+ %heada = insertelement <vscale x 8 x i8> undef, i8 5, i32 0
+ %splata = shufflevector <vscale x 8 x i8> %heada, <vscale x 8 x i8> undef, <vscale x 8 x i32> zeroinitializer
+ %headb = insertelement <vscale x 8 x i8> undef, i8 2, i32 0
+ %splatb = shufflevector <vscale x 8 x i8> %headb, <vscale x 8 x i8> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp eq <vscale x 8 x i8> %splata, %splatb
+ ret <vscale x 8 x i1> %vc
+}
+
--- /dev/null
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs < %s | FileCheck %s
+
+; FIXME: The scalar/vector operations ('xv' and 'iv' tests) should swap
+; operands and condition codes accordingly in order to generate a 'vx' or 'vi'
+; instruction.
+
+define <vscale x 8 x i1> @icmp_eq_vv_nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %vb) {
+; CHECK-LABEL: icmp_eq_vv_nxv8i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu
+; CHECK-NEXT: vmseq.vv v0, v16, v17
+; CHECK-NEXT: ret
+ %vc = icmp eq <vscale x 8 x i8> %va, %vb
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_eq_vx_nxv8i8(<vscale x 8 x i8> %va, i8 %b) {
+; CHECK-LABEL: icmp_eq_vx_nxv8i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, zero, e8,m1,ta,mu
+; CHECK-NEXT: vmseq.vx v0, v16, a0
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i8> undef, i8 %b, i32 0
+ %splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp eq <vscale x 8 x i8> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_eq_xv_nxv8i8(<vscale x 8 x i8> %va, i8 %b) {
+; CHECK-LABEL: icmp_eq_xv_nxv8i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, zero, e8,m1,ta,mu
+; CHECK-NEXT: vmv.v.x v25, a0
+; CHECK-NEXT: vmseq.vv v0, v25, v16
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i8> undef, i8 %b, i32 0
+ %splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp eq <vscale x 8 x i8> %splat, %va
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_eq_vi_nxv8i8_0(<vscale x 8 x i8> %va) {
+; CHECK-LABEL: icmp_eq_vi_nxv8i8_0:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu
+; CHECK-NEXT: vmseq.vi v0, v16, 0
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i8> undef, i8 0, i32 0
+ %splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp eq <vscale x 8 x i8> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_eq_vi_nxv8i8_1(<vscale x 8 x i8> %va) {
+; CHECK-LABEL: icmp_eq_vi_nxv8i8_1:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu
+; CHECK-NEXT: vmseq.vi v0, v16, 5
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i8> undef, i8 5, i32 0
+ %splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp eq <vscale x 8 x i8> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_eq_iv_nxv8i8_1(<vscale x 8 x i8> %va) {
+; CHECK-LABEL: icmp_eq_iv_nxv8i8_1:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu
+; CHECK-NEXT: vmseq.vi v0, v16, 5
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i8> undef, i8 5, i32 0
+ %splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp eq <vscale x 8 x i8> %splat, %va
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_ne_vv_nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %vb) {
+; CHECK-LABEL: icmp_ne_vv_nxv8i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu
+; CHECK-NEXT: vmsne.vv v0, v16, v17
+; CHECK-NEXT: ret
+ %vc = icmp ne <vscale x 8 x i8> %va, %vb
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_ne_vx_nxv8i8(<vscale x 8 x i8> %va, i8 %b) {
+; CHECK-LABEL: icmp_ne_vx_nxv8i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, zero, e8,m1,ta,mu
+; CHECK-NEXT: vmsne.vx v0, v16, a0
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i8> undef, i8 %b, i32 0
+ %splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp ne <vscale x 8 x i8> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_ne_xv_nxv8i8(<vscale x 8 x i8> %va, i8 %b) {
+; CHECK-LABEL: icmp_ne_xv_nxv8i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, zero, e8,m1,ta,mu
+; CHECK-NEXT: vmv.v.x v25, a0
+; CHECK-NEXT: vmsne.vv v0, v25, v16
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i8> undef, i8 %b, i32 0
+ %splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp ne <vscale x 8 x i8> %splat, %va
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_ne_vi_nxv8i8_0(<vscale x 8 x i8> %va) {
+; CHECK-LABEL: icmp_ne_vi_nxv8i8_0:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu
+; CHECK-NEXT: vmsne.vi v0, v16, 5
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i8> undef, i8 5, i32 0
+ %splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp ne <vscale x 8 x i8> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_ugt_vv_nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %vb) {
+; CHECK-LABEL: icmp_ugt_vv_nxv8i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu
+; CHECK-NEXT: vmsltu.vv v0, v17, v16
+; CHECK-NEXT: ret
+ %vc = icmp ugt <vscale x 8 x i8> %va, %vb
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_ugt_vx_nxv8i8(<vscale x 8 x i8> %va, i8 %b) {
+; CHECK-LABEL: icmp_ugt_vx_nxv8i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, zero, e8,m1,ta,mu
+; CHECK-NEXT: vmsgtu.vx v0, v16, a0
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i8> undef, i8 %b, i32 0
+ %splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp ugt <vscale x 8 x i8> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_ugt_xv_nxv8i8(<vscale x 8 x i8> %va, i8 %b) {
+; CHECK-LABEL: icmp_ugt_xv_nxv8i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, zero, e8,m1,ta,mu
+; CHECK-NEXT: vmv.v.x v25, a0
+; CHECK-NEXT: vmsltu.vv v0, v16, v25
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i8> undef, i8 %b, i32 0
+ %splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp ugt <vscale x 8 x i8> %splat, %va
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_ugt_vi_nxv8i8_0(<vscale x 8 x i8> %va) {
+; CHECK-LABEL: icmp_ugt_vi_nxv8i8_0:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu
+; CHECK-NEXT: vmsgtu.vi v0, v16, 5
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i8> undef, i8 5, i32 0
+ %splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp ugt <vscale x 8 x i8> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_uge_vv_nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %vb) {
+; CHECK-LABEL: icmp_uge_vv_nxv8i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu
+; CHECK-NEXT: vmsleu.vv v0, v17, v16
+; CHECK-NEXT: ret
+ %vc = icmp uge <vscale x 8 x i8> %va, %vb
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_uge_vx_nxv8i8(<vscale x 8 x i8> %va, i8 %b) {
+; CHECK-LABEL: icmp_uge_vx_nxv8i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, zero, e8,m1,ta,mu
+; CHECK-NEXT: vmv.v.x v25, a0
+; CHECK-NEXT: vmsleu.vv v0, v25, v16
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i8> undef, i8 %b, i32 0
+ %splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp uge <vscale x 8 x i8> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_uge_xv_nxv8i8(<vscale x 8 x i8> %va, i8 %b) {
+; CHECK-LABEL: icmp_uge_xv_nxv8i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, zero, e8,m1,ta,mu
+; CHECK-NEXT: vmv.v.x v25, a0
+; CHECK-NEXT: vmsleu.vv v0, v16, v25
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i8> undef, i8 %b, i32 0
+ %splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp uge <vscale x 8 x i8> %splat, %va
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_uge_vi_nxv8i8_0(<vscale x 8 x i8> %va) {
+; CHECK-LABEL: icmp_uge_vi_nxv8i8_0:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu
+; CHECK-NEXT: vmv.v.i v25, -16
+; CHECK-NEXT: vmsleu.vv v0, v25, v16
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i8> undef, i8 -16, i32 0
+ %splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp uge <vscale x 8 x i8> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_uge_vi_nxv8i8_1(<vscale x 8 x i8> %va) {
+; CHECK-LABEL: icmp_uge_vi_nxv8i8_1:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu
+; CHECK-NEXT: vmv.v.i v25, 15
+; CHECK-NEXT: vmsleu.vv v0, v25, v16
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i8> undef, i8 15, i32 0
+ %splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp uge <vscale x 8 x i8> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_uge_iv_nxv8i8_1(<vscale x 8 x i8> %va) {
+; CHECK-LABEL: icmp_uge_iv_nxv8i8_1:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu
+; CHECK-NEXT: vmsleu.vi v0, v16, 15
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i8> undef, i8 15, i32 0
+ %splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp uge <vscale x 8 x i8> %splat, %va
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_uge_vi_nxv8i8_2(<vscale x 8 x i8> %va) {
+; CHECK-LABEL: icmp_uge_vi_nxv8i8_2:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu
+; CHECK-NEXT: vmset.m v0
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i8> undef, i8 0, i32 0
+ %splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp uge <vscale x 8 x i8> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_uge_vi_nxv8i8_3(<vscale x 8 x i8> %va) {
+; CHECK-LABEL: icmp_uge_vi_nxv8i8_3:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu
+; CHECK-NEXT: vmv.v.i v25, 1
+; CHECK-NEXT: vmsleu.vv v0, v25, v16
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i8> undef, i8 1, i32 0
+ %splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp uge <vscale x 8 x i8> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_uge_vi_nxv8i8_4(<vscale x 8 x i8> %va) {
+; CHECK-LABEL: icmp_uge_vi_nxv8i8_4:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu
+; CHECK-NEXT: vmv.v.i v25, -15
+; CHECK-NEXT: vmsleu.vv v0, v25, v16
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i8> undef, i8 -15, i32 0
+ %splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp uge <vscale x 8 x i8> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_uge_vi_nxv8i8_5(<vscale x 8 x i8> %va) {
+; CHECK-LABEL: icmp_uge_vi_nxv8i8_5:
+; CHECK: # %bb.0:
+; CHECK-NEXT: addi a0, zero, 16
+; CHECK-NEXT: vsetvli a1, zero, e8,m1,ta,mu
+; CHECK-NEXT: vmv.v.x v25, a0
+; CHECK-NEXT: vmsleu.vv v0, v25, v16
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i8> undef, i8 16, i32 0
+ %splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp uge <vscale x 8 x i8> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_ult_vv_nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %vb) {
+; CHECK-LABEL: icmp_ult_vv_nxv8i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu
+; CHECK-NEXT: vmsltu.vv v0, v16, v17
+; CHECK-NEXT: ret
+ %vc = icmp ult <vscale x 8 x i8> %va, %vb
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_ult_vx_nxv8i8(<vscale x 8 x i8> %va, i8 %b) {
+; CHECK-LABEL: icmp_ult_vx_nxv8i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, zero, e8,m1,ta,mu
+; CHECK-NEXT: vmsltu.vx v0, v16, a0
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i8> undef, i8 %b, i32 0
+ %splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp ult <vscale x 8 x i8> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_ult_xv_nxv8i8(<vscale x 8 x i8> %va, i8 %b) {
+; CHECK-LABEL: icmp_ult_xv_nxv8i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, zero, e8,m1,ta,mu
+; CHECK-NEXT: vmv.v.x v25, a0
+; CHECK-NEXT: vmsltu.vv v0, v25, v16
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i8> undef, i8 %b, i32 0
+ %splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp ult <vscale x 8 x i8> %splat, %va
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_ult_vi_nxv8i8_0(<vscale x 8 x i8> %va) {
+; CHECK-LABEL: icmp_ult_vi_nxv8i8_0:
+; CHECK: # %bb.0:
+; CHECK-NEXT: addi a0, zero, -16
+; CHECK-NEXT: vsetvli a1, zero, e8,m1,ta,mu
+; CHECK-NEXT: vmsltu.vx v0, v16, a0
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i8> undef, i8 -16, i32 0
+ %splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp ult <vscale x 8 x i8> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_ult_vi_nxv8i8_1(<vscale x 8 x i8> %va) {
+; CHECK-LABEL: icmp_ult_vi_nxv8i8_1:
+; CHECK: # %bb.0:
+; CHECK-NEXT: addi a0, zero, -15
+; CHECK-NEXT: vsetvli a1, zero, e8,m1,ta,mu
+; CHECK-NEXT: vmsltu.vx v0, v16, a0
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i8> undef, i8 -15, i32 0
+ %splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp ult <vscale x 8 x i8> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_ult_iv_nxv8i8_1(<vscale x 8 x i8> %va) {
+; CHECK-LABEL: icmp_ult_iv_nxv8i8_1:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu
+; CHECK-NEXT: vmsgtu.vi v0, v16, -15
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i8> undef, i8 -15, i32 0
+ %splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp ult <vscale x 8 x i8> %splat, %va
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_ult_vi_nxv8i8_2(<vscale x 8 x i8> %va) {
+; CHECK-LABEL: icmp_ult_vi_nxv8i8_2:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu
+; CHECK-NEXT: vmclr.m v0
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i8> undef, i8 0, i32 0
+ %splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp ult <vscale x 8 x i8> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_ult_vi_nxv8i8_3(<vscale x 8 x i8> %va) {
+; CHECK-LABEL: icmp_ult_vi_nxv8i8_3:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu
+; CHECK-NEXT: vmseq.vi v0, v16, 0
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i8> undef, i8 1, i32 0
+ %splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp ult <vscale x 8 x i8> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_ult_vi_nxv8i8_4(<vscale x 8 x i8> %va) {
+; CHECK-LABEL: icmp_ult_vi_nxv8i8_4:
+; CHECK: # %bb.0:
+; CHECK-NEXT: addi a0, zero, 16
+; CHECK-NEXT: vsetvli a1, zero, e8,m1,ta,mu
+; CHECK-NEXT: vmsltu.vx v0, v16, a0
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i8> undef, i8 16, i32 0
+ %splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp ult <vscale x 8 x i8> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_ule_vv_nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %vb) {
+; CHECK-LABEL: icmp_ule_vv_nxv8i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu
+; CHECK-NEXT: vmsleu.vv v0, v16, v17
+; CHECK-NEXT: ret
+ %vc = icmp ule <vscale x 8 x i8> %va, %vb
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_ule_vx_nxv8i8(<vscale x 8 x i8> %va, i8 %b) {
+; CHECK-LABEL: icmp_ule_vx_nxv8i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, zero, e8,m1,ta,mu
+; CHECK-NEXT: vmsleu.vx v0, v16, a0
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i8> undef, i8 %b, i32 0
+ %splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp ule <vscale x 8 x i8> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_ule_xv_nxv8i8(<vscale x 8 x i8> %va, i8 %b) {
+; CHECK-LABEL: icmp_ule_xv_nxv8i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, zero, e8,m1,ta,mu
+; CHECK-NEXT: vmv.v.x v25, a0
+; CHECK-NEXT: vmsleu.vv v0, v25, v16
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i8> undef, i8 %b, i32 0
+ %splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp ule <vscale x 8 x i8> %splat, %va
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_ule_vi_nxv8i8_0(<vscale x 8 x i8> %va) {
+; CHECK-LABEL: icmp_ule_vi_nxv8i8_0:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu
+; CHECK-NEXT: vmsleu.vi v0, v16, 5
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i8> undef, i8 5, i32 0
+ %splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp ule <vscale x 8 x i8> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_sgt_vv_nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %vb) {
+; CHECK-LABEL: icmp_sgt_vv_nxv8i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu
+; CHECK-NEXT: vmslt.vv v0, v17, v16
+; CHECK-NEXT: ret
+ %vc = icmp sgt <vscale x 8 x i8> %va, %vb
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_sgt_vx_nxv8i8(<vscale x 8 x i8> %va, i8 %b) {
+; CHECK-LABEL: icmp_sgt_vx_nxv8i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, zero, e8,m1,ta,mu
+; CHECK-NEXT: vmsgt.vx v0, v16, a0
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i8> undef, i8 %b, i32 0
+ %splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp sgt <vscale x 8 x i8> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_sgt_xv_nxv8i8(<vscale x 8 x i8> %va, i8 %b) {
+; CHECK-LABEL: icmp_sgt_xv_nxv8i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, zero, e8,m1,ta,mu
+; CHECK-NEXT: vmv.v.x v25, a0
+; CHECK-NEXT: vmslt.vv v0, v16, v25
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i8> undef, i8 %b, i32 0
+ %splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp sgt <vscale x 8 x i8> %splat, %va
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_sgt_vi_nxv8i8_0(<vscale x 8 x i8> %va) {
+; CHECK-LABEL: icmp_sgt_vi_nxv8i8_0:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu
+; CHECK-NEXT: vmsgt.vi v0, v16, 5
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i8> undef, i8 5, i32 0
+ %splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp sgt <vscale x 8 x i8> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_sge_vv_nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %vb) {
+; CHECK-LABEL: icmp_sge_vv_nxv8i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu
+; CHECK-NEXT: vmsle.vv v0, v17, v16
+; CHECK-NEXT: ret
+ %vc = icmp sge <vscale x 8 x i8> %va, %vb
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_sge_vx_nxv8i8(<vscale x 8 x i8> %va, i8 %b) {
+; CHECK-LABEL: icmp_sge_vx_nxv8i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, zero, e8,m1,ta,mu
+; CHECK-NEXT: vmv.v.x v25, a0
+; CHECK-NEXT: vmsle.vv v0, v25, v16
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i8> undef, i8 %b, i32 0
+ %splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp sge <vscale x 8 x i8> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_sge_xv_nxv8i8(<vscale x 8 x i8> %va, i8 %b) {
+; CHECK-LABEL: icmp_sge_xv_nxv8i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, zero, e8,m1,ta,mu
+; CHECK-NEXT: vmv.v.x v25, a0
+; CHECK-NEXT: vmsle.vv v0, v16, v25
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i8> undef, i8 %b, i32 0
+ %splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp sge <vscale x 8 x i8> %splat, %va
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_sge_vi_nxv8i8_0(<vscale x 8 x i8> %va) {
+; CHECK-LABEL: icmp_sge_vi_nxv8i8_0:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu
+; CHECK-NEXT: vmv.v.i v25, -16
+; CHECK-NEXT: vmsle.vv v0, v25, v16
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i8> undef, i8 -16, i32 0
+ %splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp sge <vscale x 8 x i8> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_sge_vi_nxv8i8_1(<vscale x 8 x i8> %va) {
+; CHECK-LABEL: icmp_sge_vi_nxv8i8_1:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu
+; CHECK-NEXT: vmv.v.i v25, -15
+; CHECK-NEXT: vmsle.vv v0, v25, v16
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i8> undef, i8 -15, i32 0
+ %splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp sge <vscale x 8 x i8> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_sge_iv_nxv8i8_1(<vscale x 8 x i8> %va) {
+; CHECK-LABEL: icmp_sge_iv_nxv8i8_1:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu
+; CHECK-NEXT: vmsle.vi v0, v16, -15
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i8> undef, i8 -15, i32 0
+ %splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp sge <vscale x 8 x i8> %splat, %va
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_sge_vi_nxv8i8_2(<vscale x 8 x i8> %va) {
+; CHECK-LABEL: icmp_sge_vi_nxv8i8_2:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu
+; CHECK-NEXT: vmv.v.i v25, 0
+; CHECK-NEXT: vmsle.vv v0, v25, v16
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i8> undef, i8 0, i32 0
+ %splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp sge <vscale x 8 x i8> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_sge_vi_nxv8i8_3(<vscale x 8 x i8> %va) {
+; CHECK-LABEL: icmp_sge_vi_nxv8i8_3:
+; CHECK: # %bb.0:
+; CHECK-NEXT: addi a0, zero, 16
+; CHECK-NEXT: vsetvli a1, zero, e8,m1,ta,mu
+; CHECK-NEXT: vmv.v.x v25, a0
+; CHECK-NEXT: vmsle.vv v0, v25, v16
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i8> undef, i8 16, i32 0
+ %splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp sge <vscale x 8 x i8> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_slt_vv_nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %vb) {
+; CHECK-LABEL: icmp_slt_vv_nxv8i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu
+; CHECK-NEXT: vmslt.vv v0, v16, v17
+; CHECK-NEXT: ret
+ %vc = icmp slt <vscale x 8 x i8> %va, %vb
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_slt_vx_nxv8i8(<vscale x 8 x i8> %va, i8 %b) {
+; CHECK-LABEL: icmp_slt_vx_nxv8i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, zero, e8,m1,ta,mu
+; CHECK-NEXT: vmslt.vx v0, v16, a0
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i8> undef, i8 %b, i32 0
+ %splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp slt <vscale x 8 x i8> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_slt_xv_nxv8i8(<vscale x 8 x i8> %va, i8 %b) {
+; CHECK-LABEL: icmp_slt_xv_nxv8i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, zero, e8,m1,ta,mu
+; CHECK-NEXT: vmv.v.x v25, a0
+; CHECK-NEXT: vmslt.vv v0, v25, v16
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i8> undef, i8 %b, i32 0
+ %splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp slt <vscale x 8 x i8> %splat, %va
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_slt_vi_nxv8i8_0(<vscale x 8 x i8> %va) {
+; CHECK-LABEL: icmp_slt_vi_nxv8i8_0:
+; CHECK: # %bb.0:
+; CHECK-NEXT: addi a0, zero, -16
+; CHECK-NEXT: vsetvli a1, zero, e8,m1,ta,mu
+; CHECK-NEXT: vmslt.vx v0, v16, a0
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i8> undef, i8 -16, i32 0
+ %splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp slt <vscale x 8 x i8> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_slt_vi_nxv8i8_1(<vscale x 8 x i8> %va) {
+; CHECK-LABEL: icmp_slt_vi_nxv8i8_1:
+; CHECK: # %bb.0:
+; CHECK-NEXT: addi a0, zero, -15
+; CHECK-NEXT: vsetvli a1, zero, e8,m1,ta,mu
+; CHECK-NEXT: vmslt.vx v0, v16, a0
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i8> undef, i8 -15, i32 0
+ %splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp slt <vscale x 8 x i8> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_slt_iv_nxv8i8_1(<vscale x 8 x i8> %va) {
+; CHECK-LABEL: icmp_slt_iv_nxv8i8_1:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu
+; CHECK-NEXT: vmsgt.vi v0, v16, -15
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i8> undef, i8 -15, i32 0
+ %splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp slt <vscale x 8 x i8> %splat, %va
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_slt_vi_nxv8i8_2(<vscale x 8 x i8> %va) {
+; CHECK-LABEL: icmp_slt_vi_nxv8i8_2:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu
+; CHECK-NEXT: vmslt.vx v0, v16, zero
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i8> undef, i8 0, i32 0
+ %splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp slt <vscale x 8 x i8> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_slt_vi_nxv8i8_3(<vscale x 8 x i8> %va) {
+; CHECK-LABEL: icmp_slt_vi_nxv8i8_3:
+; CHECK: # %bb.0:
+; CHECK-NEXT: addi a0, zero, 16
+; CHECK-NEXT: vsetvli a1, zero, e8,m1,ta,mu
+; CHECK-NEXT: vmslt.vx v0, v16, a0
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i8> undef, i8 16, i32 0
+ %splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp slt <vscale x 8 x i8> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_sle_vv_nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %vb) {
+; CHECK-LABEL: icmp_sle_vv_nxv8i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu
+; CHECK-NEXT: vmsle.vv v0, v16, v17
+; CHECK-NEXT: ret
+ %vc = icmp sle <vscale x 8 x i8> %va, %vb
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_sle_vx_nxv8i8(<vscale x 8 x i8> %va, i8 %b) {
+; CHECK-LABEL: icmp_sle_vx_nxv8i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, zero, e8,m1,ta,mu
+; CHECK-NEXT: vmsle.vx v0, v16, a0
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i8> undef, i8 %b, i32 0
+ %splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp sle <vscale x 8 x i8> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_sle_xv_nxv8i8(<vscale x 8 x i8> %va, i8 %b) {
+; CHECK-LABEL: icmp_sle_xv_nxv8i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, zero, e8,m1,ta,mu
+; CHECK-NEXT: vmv.v.x v25, a0
+; CHECK-NEXT: vmsle.vv v0, v25, v16
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i8> undef, i8 %b, i32 0
+ %splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp sle <vscale x 8 x i8> %splat, %va
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_sle_vi_nxv8i8_0(<vscale x 8 x i8> %va) {
+; CHECK-LABEL: icmp_sle_vi_nxv8i8_0:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu
+; CHECK-NEXT: vmsle.vi v0, v16, 5
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i8> undef, i8 5, i32 0
+ %splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp sle <vscale x 8 x i8> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_eq_vv_nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %vb) {
+; CHECK-LABEL: icmp_eq_vv_nxv8i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu
+; CHECK-NEXT: vmseq.vv v0, v16, v18
+; CHECK-NEXT: ret
+ %vc = icmp eq <vscale x 8 x i16> %va, %vb
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_eq_vx_nxv8i16(<vscale x 8 x i16> %va, i16 %b) {
+; CHECK-LABEL: icmp_eq_vx_nxv8i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, zero, e16,m2,ta,mu
+; CHECK-NEXT: vmseq.vx v0, v16, a0
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i16> undef, i16 %b, i32 0
+ %splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp eq <vscale x 8 x i16> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_eq_xv_nxv8i16(<vscale x 8 x i16> %va, i16 %b) {
+; CHECK-LABEL: icmp_eq_xv_nxv8i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, zero, e16,m2,ta,mu
+; CHECK-NEXT: vmv.v.x v26, a0
+; CHECK-NEXT: vmseq.vv v0, v26, v16
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i16> undef, i16 %b, i32 0
+ %splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp eq <vscale x 8 x i16> %splat, %va
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_eq_vi_nxv8i16_0(<vscale x 8 x i16> %va) {
+; CHECK-LABEL: icmp_eq_vi_nxv8i16_0:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu
+; CHECK-NEXT: vmseq.vi v0, v16, 0
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i16> undef, i16 0, i32 0
+ %splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp eq <vscale x 8 x i16> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_eq_vi_nxv8i16_1(<vscale x 8 x i16> %va) {
+; CHECK-LABEL: icmp_eq_vi_nxv8i16_1:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu
+; CHECK-NEXT: vmseq.vi v0, v16, 5
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i16> undef, i16 5, i32 0
+ %splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp eq <vscale x 8 x i16> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_eq_iv_nxv8i16_1(<vscale x 8 x i16> %va) {
+; CHECK-LABEL: icmp_eq_iv_nxv8i16_1:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu
+; CHECK-NEXT: vmseq.vi v0, v16, 5
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i16> undef, i16 5, i32 0
+ %splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp eq <vscale x 8 x i16> %splat, %va
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_ne_vv_nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %vb) {
+; CHECK-LABEL: icmp_ne_vv_nxv8i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu
+; CHECK-NEXT: vmsne.vv v0, v16, v18
+; CHECK-NEXT: ret
+ %vc = icmp ne <vscale x 8 x i16> %va, %vb
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_ne_vx_nxv8i16(<vscale x 8 x i16> %va, i16 %b) {
+; CHECK-LABEL: icmp_ne_vx_nxv8i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, zero, e16,m2,ta,mu
+; CHECK-NEXT: vmsne.vx v0, v16, a0
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i16> undef, i16 %b, i32 0
+ %splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp ne <vscale x 8 x i16> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_ne_xv_nxv8i16(<vscale x 8 x i16> %va, i16 %b) {
+; CHECK-LABEL: icmp_ne_xv_nxv8i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, zero, e16,m2,ta,mu
+; CHECK-NEXT: vmv.v.x v26, a0
+; CHECK-NEXT: vmsne.vv v0, v26, v16
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i16> undef, i16 %b, i32 0
+ %splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp ne <vscale x 8 x i16> %splat, %va
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_ne_vi_nxv8i16_0(<vscale x 8 x i16> %va) {
+; CHECK-LABEL: icmp_ne_vi_nxv8i16_0:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu
+; CHECK-NEXT: vmsne.vi v0, v16, 5
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i16> undef, i16 5, i32 0
+ %splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp ne <vscale x 8 x i16> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_ugt_vv_nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %vb) {
+; CHECK-LABEL: icmp_ugt_vv_nxv8i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu
+; CHECK-NEXT: vmsltu.vv v0, v18, v16
+; CHECK-NEXT: ret
+ %vc = icmp ugt <vscale x 8 x i16> %va, %vb
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_ugt_vx_nxv8i16(<vscale x 8 x i16> %va, i16 %b) {
+; CHECK-LABEL: icmp_ugt_vx_nxv8i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, zero, e16,m2,ta,mu
+; CHECK-NEXT: vmsgtu.vx v0, v16, a0
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i16> undef, i16 %b, i32 0
+ %splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp ugt <vscale x 8 x i16> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_ugt_xv_nxv8i16(<vscale x 8 x i16> %va, i16 %b) {
+; CHECK-LABEL: icmp_ugt_xv_nxv8i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, zero, e16,m2,ta,mu
+; CHECK-NEXT: vmv.v.x v26, a0
+; CHECK-NEXT: vmsltu.vv v0, v16, v26
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i16> undef, i16 %b, i32 0
+ %splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp ugt <vscale x 8 x i16> %splat, %va
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_ugt_vi_nxv8i16_0(<vscale x 8 x i16> %va) {
+; CHECK-LABEL: icmp_ugt_vi_nxv8i16_0:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu
+; CHECK-NEXT: vmsgtu.vi v0, v16, 5
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i16> undef, i16 5, i32 0
+ %splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp ugt <vscale x 8 x i16> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_uge_vv_nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %vb) {
+; CHECK-LABEL: icmp_uge_vv_nxv8i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu
+; CHECK-NEXT: vmsleu.vv v0, v18, v16
+; CHECK-NEXT: ret
+ %vc = icmp uge <vscale x 8 x i16> %va, %vb
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_uge_vx_nxv8i16(<vscale x 8 x i16> %va, i16 %b) {
+; CHECK-LABEL: icmp_uge_vx_nxv8i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, zero, e16,m2,ta,mu
+; CHECK-NEXT: vmv.v.x v26, a0
+; CHECK-NEXT: vmsleu.vv v0, v26, v16
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i16> undef, i16 %b, i32 0
+ %splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp uge <vscale x 8 x i16> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_uge_xv_nxv8i16(<vscale x 8 x i16> %va, i16 %b) {
+; CHECK-LABEL: icmp_uge_xv_nxv8i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, zero, e16,m2,ta,mu
+; CHECK-NEXT: vmv.v.x v26, a0
+; CHECK-NEXT: vmsleu.vv v0, v16, v26
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i16> undef, i16 %b, i32 0
+ %splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp uge <vscale x 8 x i16> %splat, %va
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_uge_vi_nxv8i16_0(<vscale x 8 x i16> %va) {
+; CHECK-LABEL: icmp_uge_vi_nxv8i16_0:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu
+; CHECK-NEXT: vmv.v.i v26, -16
+; CHECK-NEXT: vmsleu.vv v0, v26, v16
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i16> undef, i16 -16, i32 0
+ %splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp uge <vscale x 8 x i16> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_uge_vi_nxv8i16_1(<vscale x 8 x i16> %va) {
+; CHECK-LABEL: icmp_uge_vi_nxv8i16_1:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu
+; CHECK-NEXT: vmv.v.i v26, 15
+; CHECK-NEXT: vmsleu.vv v0, v26, v16
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i16> undef, i16 15, i32 0
+ %splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp uge <vscale x 8 x i16> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_uge_iv_nxv8i16_1(<vscale x 8 x i16> %va) {
+; CHECK-LABEL: icmp_uge_iv_nxv8i16_1:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu
+; CHECK-NEXT: vmsleu.vi v0, v16, 15
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i16> undef, i16 15, i32 0
+ %splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp uge <vscale x 8 x i16> %splat, %va
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_uge_vi_nxv8i16_2(<vscale x 8 x i16> %va) {
+; CHECK-LABEL: icmp_uge_vi_nxv8i16_2:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu
+; CHECK-NEXT: vmset.m v0
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i16> undef, i16 0, i32 0
+ %splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp uge <vscale x 8 x i16> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_uge_vi_nxv8i16_3(<vscale x 8 x i16> %va) {
+; CHECK-LABEL: icmp_uge_vi_nxv8i16_3:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu
+; CHECK-NEXT: vmv.v.i v26, 1
+; CHECK-NEXT: vmsleu.vv v0, v26, v16
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i16> undef, i16 1, i32 0
+ %splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp uge <vscale x 8 x i16> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_uge_vi_nxv8i16_4(<vscale x 8 x i16> %va) {
+; CHECK-LABEL: icmp_uge_vi_nxv8i16_4:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu
+; CHECK-NEXT: vmv.v.i v26, -15
+; CHECK-NEXT: vmsleu.vv v0, v26, v16
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i16> undef, i16 -15, i32 0
+ %splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp uge <vscale x 8 x i16> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_uge_vi_nxv8i16_5(<vscale x 8 x i16> %va) {
+; CHECK-LABEL: icmp_uge_vi_nxv8i16_5:
+; CHECK: # %bb.0:
+; CHECK-NEXT: addi a0, zero, 16
+; CHECK-NEXT: vsetvli a1, zero, e16,m2,ta,mu
+; CHECK-NEXT: vmv.v.x v26, a0
+; CHECK-NEXT: vmsleu.vv v0, v26, v16
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i16> undef, i16 16, i32 0
+ %splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp uge <vscale x 8 x i16> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_ult_vv_nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %vb) {
+; CHECK-LABEL: icmp_ult_vv_nxv8i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu
+; CHECK-NEXT: vmsltu.vv v0, v16, v18
+; CHECK-NEXT: ret
+ %vc = icmp ult <vscale x 8 x i16> %va, %vb
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_ult_vx_nxv8i16(<vscale x 8 x i16> %va, i16 %b) {
+; CHECK-LABEL: icmp_ult_vx_nxv8i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, zero, e16,m2,ta,mu
+; CHECK-NEXT: vmsltu.vx v0, v16, a0
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i16> undef, i16 %b, i32 0
+ %splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp ult <vscale x 8 x i16> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_ult_xv_nxv8i16(<vscale x 8 x i16> %va, i16 %b) {
+; CHECK-LABEL: icmp_ult_xv_nxv8i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, zero, e16,m2,ta,mu
+; CHECK-NEXT: vmv.v.x v26, a0
+; CHECK-NEXT: vmsltu.vv v0, v26, v16
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i16> undef, i16 %b, i32 0
+ %splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp ult <vscale x 8 x i16> %splat, %va
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_ult_vi_nxv8i16_0(<vscale x 8 x i16> %va) {
+; CHECK-LABEL: icmp_ult_vi_nxv8i16_0:
+; CHECK: # %bb.0:
+; CHECK-NEXT: addi a0, zero, -16
+; CHECK-NEXT: vsetvli a1, zero, e16,m2,ta,mu
+; CHECK-NEXT: vmsltu.vx v0, v16, a0
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i16> undef, i16 -16, i32 0
+ %splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp ult <vscale x 8 x i16> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_ult_vi_nxv8i16_1(<vscale x 8 x i16> %va) {
+; CHECK-LABEL: icmp_ult_vi_nxv8i16_1:
+; CHECK: # %bb.0:
+; CHECK-NEXT: addi a0, zero, -15
+; CHECK-NEXT: vsetvli a1, zero, e16,m2,ta,mu
+; CHECK-NEXT: vmsltu.vx v0, v16, a0
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i16> undef, i16 -15, i32 0
+ %splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp ult <vscale x 8 x i16> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_ult_iv_nxv8i16_1(<vscale x 8 x i16> %va) {
+; CHECK-LABEL: icmp_ult_iv_nxv8i16_1:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu
+; CHECK-NEXT: vmsgtu.vi v0, v16, -15
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i16> undef, i16 -15, i32 0
+ %splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp ult <vscale x 8 x i16> %splat, %va
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_ult_vi_nxv8i16_2(<vscale x 8 x i16> %va) {
+; CHECK-LABEL: icmp_ult_vi_nxv8i16_2:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu
+; CHECK-NEXT: vmclr.m v0
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i16> undef, i16 0, i32 0
+ %splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp ult <vscale x 8 x i16> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_ult_vi_nxv8i16_3(<vscale x 8 x i16> %va) {
+; CHECK-LABEL: icmp_ult_vi_nxv8i16_3:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu
+; CHECK-NEXT: vmseq.vi v0, v16, 0
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i16> undef, i16 1, i32 0
+ %splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp ult <vscale x 8 x i16> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_ult_vi_nxv8i16_4(<vscale x 8 x i16> %va) {
+; CHECK-LABEL: icmp_ult_vi_nxv8i16_4:
+; CHECK: # %bb.0:
+; CHECK-NEXT: addi a0, zero, 16
+; CHECK-NEXT: vsetvli a1, zero, e16,m2,ta,mu
+; CHECK-NEXT: vmsltu.vx v0, v16, a0
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i16> undef, i16 16, i32 0
+ %splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp ult <vscale x 8 x i16> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_ule_vv_nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %vb) {
+; CHECK-LABEL: icmp_ule_vv_nxv8i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu
+; CHECK-NEXT: vmsleu.vv v0, v16, v18
+; CHECK-NEXT: ret
+ %vc = icmp ule <vscale x 8 x i16> %va, %vb
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_ule_vx_nxv8i16(<vscale x 8 x i16> %va, i16 %b) {
+; CHECK-LABEL: icmp_ule_vx_nxv8i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, zero, e16,m2,ta,mu
+; CHECK-NEXT: vmsleu.vx v0, v16, a0
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i16> undef, i16 %b, i32 0
+ %splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp ule <vscale x 8 x i16> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_ule_xv_nxv8i16(<vscale x 8 x i16> %va, i16 %b) {
+; CHECK-LABEL: icmp_ule_xv_nxv8i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, zero, e16,m2,ta,mu
+; CHECK-NEXT: vmv.v.x v26, a0
+; CHECK-NEXT: vmsleu.vv v0, v26, v16
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i16> undef, i16 %b, i32 0
+ %splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp ule <vscale x 8 x i16> %splat, %va
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_ule_vi_nxv8i16_0(<vscale x 8 x i16> %va) {
+; CHECK-LABEL: icmp_ule_vi_nxv8i16_0:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu
+; CHECK-NEXT: vmsleu.vi v0, v16, 5
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i16> undef, i16 5, i32 0
+ %splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp ule <vscale x 8 x i16> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_sgt_vv_nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %vb) {
+; CHECK-LABEL: icmp_sgt_vv_nxv8i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu
+; CHECK-NEXT: vmslt.vv v0, v18, v16
+; CHECK-NEXT: ret
+ %vc = icmp sgt <vscale x 8 x i16> %va, %vb
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_sgt_vx_nxv8i16(<vscale x 8 x i16> %va, i16 %b) {
+; CHECK-LABEL: icmp_sgt_vx_nxv8i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, zero, e16,m2,ta,mu
+; CHECK-NEXT: vmsgt.vx v0, v16, a0
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i16> undef, i16 %b, i32 0
+ %splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp sgt <vscale x 8 x i16> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_sgt_xv_nxv8i16(<vscale x 8 x i16> %va, i16 %b) {
+; CHECK-LABEL: icmp_sgt_xv_nxv8i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, zero, e16,m2,ta,mu
+; CHECK-NEXT: vmv.v.x v26, a0
+; CHECK-NEXT: vmslt.vv v0, v16, v26
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i16> undef, i16 %b, i32 0
+ %splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp sgt <vscale x 8 x i16> %splat, %va
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_sgt_vi_nxv8i16_0(<vscale x 8 x i16> %va) {
+; CHECK-LABEL: icmp_sgt_vi_nxv8i16_0:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu
+; CHECK-NEXT: vmsgt.vi v0, v16, 5
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i16> undef, i16 5, i32 0
+ %splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp sgt <vscale x 8 x i16> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_sge_vv_nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %vb) {
+; CHECK-LABEL: icmp_sge_vv_nxv8i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu
+; CHECK-NEXT: vmsle.vv v0, v18, v16
+; CHECK-NEXT: ret
+ %vc = icmp sge <vscale x 8 x i16> %va, %vb
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_sge_vx_nxv8i16(<vscale x 8 x i16> %va, i16 %b) {
+; CHECK-LABEL: icmp_sge_vx_nxv8i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, zero, e16,m2,ta,mu
+; CHECK-NEXT: vmv.v.x v26, a0
+; CHECK-NEXT: vmsle.vv v0, v26, v16
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i16> undef, i16 %b, i32 0
+ %splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp sge <vscale x 8 x i16> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_sge_xv_nxv8i16(<vscale x 8 x i16> %va, i16 %b) {
+; CHECK-LABEL: icmp_sge_xv_nxv8i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, zero, e16,m2,ta,mu
+; CHECK-NEXT: vmv.v.x v26, a0
+; CHECK-NEXT: vmsle.vv v0, v16, v26
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i16> undef, i16 %b, i32 0
+ %splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp sge <vscale x 8 x i16> %splat, %va
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_sge_vi_nxv8i16_0(<vscale x 8 x i16> %va) {
+; CHECK-LABEL: icmp_sge_vi_nxv8i16_0:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu
+; CHECK-NEXT: vmv.v.i v26, -16
+; CHECK-NEXT: vmsle.vv v0, v26, v16
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i16> undef, i16 -16, i32 0
+ %splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp sge <vscale x 8 x i16> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_sge_vi_nxv8i16_1(<vscale x 8 x i16> %va) {
+; CHECK-LABEL: icmp_sge_vi_nxv8i16_1:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu
+; CHECK-NEXT: vmv.v.i v26, -15
+; CHECK-NEXT: vmsle.vv v0, v26, v16
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i16> undef, i16 -15, i32 0
+ %splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp sge <vscale x 8 x i16> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_sge_iv_nxv8i16_1(<vscale x 8 x i16> %va) {
+; CHECK-LABEL: icmp_sge_iv_nxv8i16_1:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu
+; CHECK-NEXT: vmsle.vi v0, v16, -15
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i16> undef, i16 -15, i32 0
+ %splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp sge <vscale x 8 x i16> %splat, %va
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_sge_vi_nxv8i16_2(<vscale x 8 x i16> %va) {
+; CHECK-LABEL: icmp_sge_vi_nxv8i16_2:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu
+; CHECK-NEXT: vmv.v.i v26, 0
+; CHECK-NEXT: vmsle.vv v0, v26, v16
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i16> undef, i16 0, i32 0
+ %splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp sge <vscale x 8 x i16> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_sge_vi_nxv8i16_3(<vscale x 8 x i16> %va) {
+; CHECK-LABEL: icmp_sge_vi_nxv8i16_3:
+; CHECK: # %bb.0:
+; CHECK-NEXT: addi a0, zero, 16
+; CHECK-NEXT: vsetvli a1, zero, e16,m2,ta,mu
+; CHECK-NEXT: vmv.v.x v26, a0
+; CHECK-NEXT: vmsle.vv v0, v26, v16
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i16> undef, i16 16, i32 0
+ %splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp sge <vscale x 8 x i16> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_slt_vv_nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %vb) {
+; CHECK-LABEL: icmp_slt_vv_nxv8i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu
+; CHECK-NEXT: vmslt.vv v0, v16, v18
+; CHECK-NEXT: ret
+ %vc = icmp slt <vscale x 8 x i16> %va, %vb
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_slt_vx_nxv8i16(<vscale x 8 x i16> %va, i16 %b) {
+; CHECK-LABEL: icmp_slt_vx_nxv8i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, zero, e16,m2,ta,mu
+; CHECK-NEXT: vmslt.vx v0, v16, a0
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i16> undef, i16 %b, i32 0
+ %splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp slt <vscale x 8 x i16> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_slt_xv_nxv8i16(<vscale x 8 x i16> %va, i16 %b) {
+; CHECK-LABEL: icmp_slt_xv_nxv8i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, zero, e16,m2,ta,mu
+; CHECK-NEXT: vmv.v.x v26, a0
+; CHECK-NEXT: vmslt.vv v0, v26, v16
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i16> undef, i16 %b, i32 0
+ %splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp slt <vscale x 8 x i16> %splat, %va
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_slt_vi_nxv8i16_0(<vscale x 8 x i16> %va) {
+; CHECK-LABEL: icmp_slt_vi_nxv8i16_0:
+; CHECK: # %bb.0:
+; CHECK-NEXT: addi a0, zero, -16
+; CHECK-NEXT: vsetvli a1, zero, e16,m2,ta,mu
+; CHECK-NEXT: vmslt.vx v0, v16, a0
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i16> undef, i16 -16, i32 0
+ %splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp slt <vscale x 8 x i16> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_slt_vi_nxv8i16_1(<vscale x 8 x i16> %va) {
+; CHECK-LABEL: icmp_slt_vi_nxv8i16_1:
+; CHECK: # %bb.0:
+; CHECK-NEXT: addi a0, zero, -15
+; CHECK-NEXT: vsetvli a1, zero, e16,m2,ta,mu
+; CHECK-NEXT: vmslt.vx v0, v16, a0
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i16> undef, i16 -15, i32 0
+ %splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp slt <vscale x 8 x i16> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_slt_iv_nxv8i16_1(<vscale x 8 x i16> %va) {
+; CHECK-LABEL: icmp_slt_iv_nxv8i16_1:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu
+; CHECK-NEXT: vmsgt.vi v0, v16, -15
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i16> undef, i16 -15, i32 0
+ %splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp slt <vscale x 8 x i16> %splat, %va
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_slt_vi_nxv8i16_2(<vscale x 8 x i16> %va) {
+; CHECK-LABEL: icmp_slt_vi_nxv8i16_2:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu
+; CHECK-NEXT: vmslt.vx v0, v16, zero
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i16> undef, i16 0, i32 0
+ %splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp slt <vscale x 8 x i16> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_slt_vi_nxv8i16_3(<vscale x 8 x i16> %va) {
+; CHECK-LABEL: icmp_slt_vi_nxv8i16_3:
+; CHECK: # %bb.0:
+; CHECK-NEXT: addi a0, zero, 16
+; CHECK-NEXT: vsetvli a1, zero, e16,m2,ta,mu
+; CHECK-NEXT: vmslt.vx v0, v16, a0
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i16> undef, i16 16, i32 0
+ %splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp slt <vscale x 8 x i16> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_sle_vv_nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %vb) {
+; CHECK-LABEL: icmp_sle_vv_nxv8i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu
+; CHECK-NEXT: vmsle.vv v0, v16, v18
+; CHECK-NEXT: ret
+ %vc = icmp sle <vscale x 8 x i16> %va, %vb
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_sle_vx_nxv8i16(<vscale x 8 x i16> %va, i16 %b) {
+; CHECK-LABEL: icmp_sle_vx_nxv8i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, zero, e16,m2,ta,mu
+; CHECK-NEXT: vmsle.vx v0, v16, a0
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i16> undef, i16 %b, i32 0
+ %splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp sle <vscale x 8 x i16> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_sle_xv_nxv8i16(<vscale x 8 x i16> %va, i16 %b) {
+; CHECK-LABEL: icmp_sle_xv_nxv8i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, zero, e16,m2,ta,mu
+; CHECK-NEXT: vmv.v.x v26, a0
+; CHECK-NEXT: vmsle.vv v0, v26, v16
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i16> undef, i16 %b, i32 0
+ %splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp sle <vscale x 8 x i16> %splat, %va
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_sle_vi_nxv8i16_0(<vscale x 8 x i16> %va) {
+; CHECK-LABEL: icmp_sle_vi_nxv8i16_0:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu
+; CHECK-NEXT: vmsle.vi v0, v16, 5
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i16> undef, i16 5, i32 0
+ %splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp sle <vscale x 8 x i16> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_eq_vv_nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %vb) {
+; CHECK-LABEL: icmp_eq_vv_nxv8i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu
+; CHECK-NEXT: vmseq.vv v0, v16, v20
+; CHECK-NEXT: ret
+ %vc = icmp eq <vscale x 8 x i32> %va, %vb
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_eq_vx_nxv8i32(<vscale x 8 x i32> %va, i32 %b) {
+; CHECK-LABEL: icmp_eq_vx_nxv8i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, zero, e32,m4,ta,mu
+; CHECK-NEXT: vmseq.vx v0, v16, a0
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i32> undef, i32 %b, i32 0
+ %splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp eq <vscale x 8 x i32> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_eq_xv_nxv8i32(<vscale x 8 x i32> %va, i32 %b) {
+; CHECK-LABEL: icmp_eq_xv_nxv8i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, zero, e32,m4,ta,mu
+; CHECK-NEXT: vmv.v.x v28, a0
+; CHECK-NEXT: vmseq.vv v0, v28, v16
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i32> undef, i32 %b, i32 0
+ %splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp eq <vscale x 8 x i32> %splat, %va
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_eq_vi_nxv8i32_0(<vscale x 8 x i32> %va) {
+; CHECK-LABEL: icmp_eq_vi_nxv8i32_0:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu
+; CHECK-NEXT: vmseq.vi v0, v16, 0
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i32> undef, i32 0, i32 0
+ %splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp eq <vscale x 8 x i32> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_eq_vi_nxv8i32_1(<vscale x 8 x i32> %va) {
+; CHECK-LABEL: icmp_eq_vi_nxv8i32_1:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu
+; CHECK-NEXT: vmseq.vi v0, v16, 5
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i32> undef, i32 5, i32 0
+ %splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp eq <vscale x 8 x i32> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_eq_iv_nxv8i32_1(<vscale x 8 x i32> %va) {
+; CHECK-LABEL: icmp_eq_iv_nxv8i32_1:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu
+; CHECK-NEXT: vmseq.vi v0, v16, 5
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i32> undef, i32 5, i32 0
+ %splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp eq <vscale x 8 x i32> %splat, %va
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_ne_vv_nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %vb) {
+; CHECK-LABEL: icmp_ne_vv_nxv8i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu
+; CHECK-NEXT: vmsne.vv v0, v16, v20
+; CHECK-NEXT: ret
+ %vc = icmp ne <vscale x 8 x i32> %va, %vb
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_ne_vx_nxv8i32(<vscale x 8 x i32> %va, i32 %b) {
+; CHECK-LABEL: icmp_ne_vx_nxv8i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, zero, e32,m4,ta,mu
+; CHECK-NEXT: vmsne.vx v0, v16, a0
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i32> undef, i32 %b, i32 0
+ %splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp ne <vscale x 8 x i32> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_ne_xv_nxv8i32(<vscale x 8 x i32> %va, i32 %b) {
+; CHECK-LABEL: icmp_ne_xv_nxv8i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, zero, e32,m4,ta,mu
+; CHECK-NEXT: vmv.v.x v28, a0
+; CHECK-NEXT: vmsne.vv v0, v28, v16
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i32> undef, i32 %b, i32 0
+ %splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp ne <vscale x 8 x i32> %splat, %va
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_ne_vi_nxv8i32_0(<vscale x 8 x i32> %va) {
+; CHECK-LABEL: icmp_ne_vi_nxv8i32_0:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu
+; CHECK-NEXT: vmsne.vi v0, v16, 5
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i32> undef, i32 5, i32 0
+ %splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp ne <vscale x 8 x i32> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_ugt_vv_nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %vb) {
+; CHECK-LABEL: icmp_ugt_vv_nxv8i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu
+; CHECK-NEXT: vmsltu.vv v0, v20, v16
+; CHECK-NEXT: ret
+ %vc = icmp ugt <vscale x 8 x i32> %va, %vb
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_ugt_vx_nxv8i32(<vscale x 8 x i32> %va, i32 %b) {
+; CHECK-LABEL: icmp_ugt_vx_nxv8i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, zero, e32,m4,ta,mu
+; CHECK-NEXT: vmsgtu.vx v0, v16, a0
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i32> undef, i32 %b, i32 0
+ %splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp ugt <vscale x 8 x i32> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_ugt_xv_nxv8i32(<vscale x 8 x i32> %va, i32 %b) {
+; CHECK-LABEL: icmp_ugt_xv_nxv8i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, zero, e32,m4,ta,mu
+; CHECK-NEXT: vmv.v.x v28, a0
+; CHECK-NEXT: vmsltu.vv v0, v16, v28
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i32> undef, i32 %b, i32 0
+ %splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp ugt <vscale x 8 x i32> %splat, %va
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_ugt_vi_nxv8i32_0(<vscale x 8 x i32> %va) {
+; CHECK-LABEL: icmp_ugt_vi_nxv8i32_0:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu
+; CHECK-NEXT: vmsgtu.vi v0, v16, 5
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i32> undef, i32 5, i32 0
+ %splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp ugt <vscale x 8 x i32> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_uge_vv_nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %vb) {
+; CHECK-LABEL: icmp_uge_vv_nxv8i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu
+; CHECK-NEXT: vmsleu.vv v0, v20, v16
+; CHECK-NEXT: ret
+ %vc = icmp uge <vscale x 8 x i32> %va, %vb
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_uge_vx_nxv8i32(<vscale x 8 x i32> %va, i32 %b) {
+; CHECK-LABEL: icmp_uge_vx_nxv8i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, zero, e32,m4,ta,mu
+; CHECK-NEXT: vmv.v.x v28, a0
+; CHECK-NEXT: vmsleu.vv v0, v28, v16
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i32> undef, i32 %b, i32 0
+ %splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp uge <vscale x 8 x i32> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_uge_xv_nxv8i32(<vscale x 8 x i32> %va, i32 %b) {
+; CHECK-LABEL: icmp_uge_xv_nxv8i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, zero, e32,m4,ta,mu
+; CHECK-NEXT: vmv.v.x v28, a0
+; CHECK-NEXT: vmsleu.vv v0, v16, v28
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i32> undef, i32 %b, i32 0
+ %splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp uge <vscale x 8 x i32> %splat, %va
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_uge_vi_nxv8i32_0(<vscale x 8 x i32> %va) {
+; CHECK-LABEL: icmp_uge_vi_nxv8i32_0:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu
+; CHECK-NEXT: vmv.v.i v28, -16
+; CHECK-NEXT: vmsleu.vv v0, v28, v16
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i32> undef, i32 -16, i32 0
+ %splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp uge <vscale x 8 x i32> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_uge_vi_nxv8i32_1(<vscale x 8 x i32> %va) {
+; CHECK-LABEL: icmp_uge_vi_nxv8i32_1:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu
+; CHECK-NEXT: vmv.v.i v28, 15
+; CHECK-NEXT: vmsleu.vv v0, v28, v16
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i32> undef, i32 15, i32 0
+ %splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp uge <vscale x 8 x i32> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_uge_iv_nxv8i32_1(<vscale x 8 x i32> %va) {
+; CHECK-LABEL: icmp_uge_iv_nxv8i32_1:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu
+; CHECK-NEXT: vmsleu.vi v0, v16, 15
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i32> undef, i32 15, i32 0
+ %splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp uge <vscale x 8 x i32> %splat, %va
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_uge_vi_nxv8i32_2(<vscale x 8 x i32> %va) {
+; CHECK-LABEL: icmp_uge_vi_nxv8i32_2:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu
+; CHECK-NEXT: vmset.m v0
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i32> undef, i32 0, i32 0
+ %splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp uge <vscale x 8 x i32> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_uge_vi_nxv8i32_3(<vscale x 8 x i32> %va) {
+; CHECK-LABEL: icmp_uge_vi_nxv8i32_3:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu
+; CHECK-NEXT: vmv.v.i v28, 1
+; CHECK-NEXT: vmsleu.vv v0, v28, v16
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i32> undef, i32 1, i32 0
+ %splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp uge <vscale x 8 x i32> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_uge_vi_nxv8i32_4(<vscale x 8 x i32> %va) {
+; CHECK-LABEL: icmp_uge_vi_nxv8i32_4:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu
+; CHECK-NEXT: vmv.v.i v28, -15
+; CHECK-NEXT: vmsleu.vv v0, v28, v16
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i32> undef, i32 -15, i32 0
+ %splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp uge <vscale x 8 x i32> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_uge_vi_nxv8i32_5(<vscale x 8 x i32> %va) {
+; CHECK-LABEL: icmp_uge_vi_nxv8i32_5:
+; CHECK: # %bb.0:
+; CHECK-NEXT: addi a0, zero, 16
+; CHECK-NEXT: vsetvli a1, zero, e32,m4,ta,mu
+; CHECK-NEXT: vmv.v.x v28, a0
+; CHECK-NEXT: vmsleu.vv v0, v28, v16
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i32> undef, i32 16, i32 0
+ %splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp uge <vscale x 8 x i32> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_ult_vv_nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %vb) {
+; CHECK-LABEL: icmp_ult_vv_nxv8i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu
+; CHECK-NEXT: vmsltu.vv v0, v16, v20
+; CHECK-NEXT: ret
+ %vc = icmp ult <vscale x 8 x i32> %va, %vb
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_ult_vx_nxv8i32(<vscale x 8 x i32> %va, i32 %b) {
+; CHECK-LABEL: icmp_ult_vx_nxv8i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, zero, e32,m4,ta,mu
+; CHECK-NEXT: vmsltu.vx v0, v16, a0
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i32> undef, i32 %b, i32 0
+ %splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp ult <vscale x 8 x i32> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_ult_xv_nxv8i32(<vscale x 8 x i32> %va, i32 %b) {
+; CHECK-LABEL: icmp_ult_xv_nxv8i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, zero, e32,m4,ta,mu
+; CHECK-NEXT: vmv.v.x v28, a0
+; CHECK-NEXT: vmsltu.vv v0, v28, v16
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i32> undef, i32 %b, i32 0
+ %splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp ult <vscale x 8 x i32> %splat, %va
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_ult_vi_nxv8i32_0(<vscale x 8 x i32> %va) {
+; CHECK-LABEL: icmp_ult_vi_nxv8i32_0:
+; CHECK: # %bb.0:
+; CHECK-NEXT: addi a0, zero, -16
+; CHECK-NEXT: vsetvli a1, zero, e32,m4,ta,mu
+; CHECK-NEXT: vmsltu.vx v0, v16, a0
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i32> undef, i32 -16, i32 0
+ %splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp ult <vscale x 8 x i32> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_ult_vi_nxv8i32_1(<vscale x 8 x i32> %va) {
+; CHECK-LABEL: icmp_ult_vi_nxv8i32_1:
+; CHECK: # %bb.0:
+; CHECK-NEXT: addi a0, zero, -15
+; CHECK-NEXT: vsetvli a1, zero, e32,m4,ta,mu
+; CHECK-NEXT: vmsltu.vx v0, v16, a0
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i32> undef, i32 -15, i32 0
+ %splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp ult <vscale x 8 x i32> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_ult_iv_nxv8i32_1(<vscale x 8 x i32> %va) {
+; CHECK-LABEL: icmp_ult_iv_nxv8i32_1:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu
+; CHECK-NEXT: vmsgtu.vi v0, v16, -15
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i32> undef, i32 -15, i32 0
+ %splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp ult <vscale x 8 x i32> %splat, %va
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_ult_vi_nxv8i32_2(<vscale x 8 x i32> %va) {
+; CHECK-LABEL: icmp_ult_vi_nxv8i32_2:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu
+; CHECK-NEXT: vmclr.m v0
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i32> undef, i32 0, i32 0
+ %splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp ult <vscale x 8 x i32> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_ult_vi_nxv8i32_3(<vscale x 8 x i32> %va) {
+; CHECK-LABEL: icmp_ult_vi_nxv8i32_3:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu
+; CHECK-NEXT: vmseq.vi v0, v16, 0
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i32> undef, i32 1, i32 0
+ %splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp ult <vscale x 8 x i32> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_ult_vi_nxv8i32_4(<vscale x 8 x i32> %va) {
+; CHECK-LABEL: icmp_ult_vi_nxv8i32_4:
+; CHECK: # %bb.0:
+; CHECK-NEXT: addi a0, zero, 16
+; CHECK-NEXT: vsetvli a1, zero, e32,m4,ta,mu
+; CHECK-NEXT: vmsltu.vx v0, v16, a0
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i32> undef, i32 16, i32 0
+ %splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp ult <vscale x 8 x i32> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_ule_vv_nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %vb) {
+; CHECK-LABEL: icmp_ule_vv_nxv8i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu
+; CHECK-NEXT: vmsleu.vv v0, v16, v20
+; CHECK-NEXT: ret
+ %vc = icmp ule <vscale x 8 x i32> %va, %vb
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_ule_vx_nxv8i32(<vscale x 8 x i32> %va, i32 %b) {
+; CHECK-LABEL: icmp_ule_vx_nxv8i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, zero, e32,m4,ta,mu
+; CHECK-NEXT: vmsleu.vx v0, v16, a0
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i32> undef, i32 %b, i32 0
+ %splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp ule <vscale x 8 x i32> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_ule_xv_nxv8i32(<vscale x 8 x i32> %va, i32 %b) {
+; CHECK-LABEL: icmp_ule_xv_nxv8i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, zero, e32,m4,ta,mu
+; CHECK-NEXT: vmv.v.x v28, a0
+; CHECK-NEXT: vmsleu.vv v0, v28, v16
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i32> undef, i32 %b, i32 0
+ %splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp ule <vscale x 8 x i32> %splat, %va
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_ule_vi_nxv8i32_0(<vscale x 8 x i32> %va) {
+; CHECK-LABEL: icmp_ule_vi_nxv8i32_0:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu
+; CHECK-NEXT: vmsleu.vi v0, v16, 5
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i32> undef, i32 5, i32 0
+ %splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp ule <vscale x 8 x i32> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_sgt_vv_nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %vb) {
+; CHECK-LABEL: icmp_sgt_vv_nxv8i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu
+; CHECK-NEXT: vmslt.vv v0, v20, v16
+; CHECK-NEXT: ret
+ %vc = icmp sgt <vscale x 8 x i32> %va, %vb
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_sgt_vx_nxv8i32(<vscale x 8 x i32> %va, i32 %b) {
+; CHECK-LABEL: icmp_sgt_vx_nxv8i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, zero, e32,m4,ta,mu
+; CHECK-NEXT: vmsgt.vx v0, v16, a0
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i32> undef, i32 %b, i32 0
+ %splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp sgt <vscale x 8 x i32> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_sgt_xv_nxv8i32(<vscale x 8 x i32> %va, i32 %b) {
+; CHECK-LABEL: icmp_sgt_xv_nxv8i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, zero, e32,m4,ta,mu
+; CHECK-NEXT: vmv.v.x v28, a0
+; CHECK-NEXT: vmslt.vv v0, v16, v28
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i32> undef, i32 %b, i32 0
+ %splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp sgt <vscale x 8 x i32> %splat, %va
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_sgt_vi_nxv8i32_0(<vscale x 8 x i32> %va) {
+; CHECK-LABEL: icmp_sgt_vi_nxv8i32_0:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu
+; CHECK-NEXT: vmsgt.vi v0, v16, 5
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i32> undef, i32 5, i32 0
+ %splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp sgt <vscale x 8 x i32> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_sge_vv_nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %vb) {
+; CHECK-LABEL: icmp_sge_vv_nxv8i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu
+; CHECK-NEXT: vmsle.vv v0, v20, v16
+; CHECK-NEXT: ret
+ %vc = icmp sge <vscale x 8 x i32> %va, %vb
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_sge_vx_nxv8i32(<vscale x 8 x i32> %va, i32 %b) {
+; CHECK-LABEL: icmp_sge_vx_nxv8i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, zero, e32,m4,ta,mu
+; CHECK-NEXT: vmv.v.x v28, a0
+; CHECK-NEXT: vmsle.vv v0, v28, v16
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i32> undef, i32 %b, i32 0
+ %splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp sge <vscale x 8 x i32> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_sge_xv_nxv8i32(<vscale x 8 x i32> %va, i32 %b) {
+; CHECK-LABEL: icmp_sge_xv_nxv8i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, zero, e32,m4,ta,mu
+; CHECK-NEXT: vmv.v.x v28, a0
+; CHECK-NEXT: vmsle.vv v0, v16, v28
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i32> undef, i32 %b, i32 0
+ %splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp sge <vscale x 8 x i32> %splat, %va
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_sge_vi_nxv8i32_0(<vscale x 8 x i32> %va) {
+; CHECK-LABEL: icmp_sge_vi_nxv8i32_0:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu
+; CHECK-NEXT: vmv.v.i v28, -16
+; CHECK-NEXT: vmsle.vv v0, v28, v16
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i32> undef, i32 -16, i32 0
+ %splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp sge <vscale x 8 x i32> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_sge_vi_nxv8i32_1(<vscale x 8 x i32> %va) {
+; CHECK-LABEL: icmp_sge_vi_nxv8i32_1:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu
+; CHECK-NEXT: vmv.v.i v28, -15
+; CHECK-NEXT: vmsle.vv v0, v28, v16
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i32> undef, i32 -15, i32 0
+ %splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp sge <vscale x 8 x i32> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_sge_iv_nxv8i32_1(<vscale x 8 x i32> %va) {
+; CHECK-LABEL: icmp_sge_iv_nxv8i32_1:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu
+; CHECK-NEXT: vmsle.vi v0, v16, -15
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i32> undef, i32 -15, i32 0
+ %splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp sge <vscale x 8 x i32> %splat, %va
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_sge_vi_nxv8i32_2(<vscale x 8 x i32> %va) {
+; CHECK-LABEL: icmp_sge_vi_nxv8i32_2:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu
+; CHECK-NEXT: vmv.v.i v28, 0
+; CHECK-NEXT: vmsle.vv v0, v28, v16
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i32> undef, i32 0, i32 0
+ %splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp sge <vscale x 8 x i32> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_sge_vi_nxv8i32_3(<vscale x 8 x i32> %va) {
+; CHECK-LABEL: icmp_sge_vi_nxv8i32_3:
+; CHECK: # %bb.0:
+; CHECK-NEXT: addi a0, zero, 16
+; CHECK-NEXT: vsetvli a1, zero, e32,m4,ta,mu
+; CHECK-NEXT: vmv.v.x v28, a0
+; CHECK-NEXT: vmsle.vv v0, v28, v16
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i32> undef, i32 16, i32 0
+ %splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp sge <vscale x 8 x i32> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_slt_vv_nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %vb) {
+; CHECK-LABEL: icmp_slt_vv_nxv8i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu
+; CHECK-NEXT: vmslt.vv v0, v16, v20
+; CHECK-NEXT: ret
+ %vc = icmp slt <vscale x 8 x i32> %va, %vb
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_slt_vx_nxv8i32(<vscale x 8 x i32> %va, i32 %b) {
+; CHECK-LABEL: icmp_slt_vx_nxv8i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, zero, e32,m4,ta,mu
+; CHECK-NEXT: vmslt.vx v0, v16, a0
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i32> undef, i32 %b, i32 0
+ %splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp slt <vscale x 8 x i32> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_slt_xv_nxv8i32(<vscale x 8 x i32> %va, i32 %b) {
+; CHECK-LABEL: icmp_slt_xv_nxv8i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, zero, e32,m4,ta,mu
+; CHECK-NEXT: vmv.v.x v28, a0
+; CHECK-NEXT: vmslt.vv v0, v28, v16
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i32> undef, i32 %b, i32 0
+ %splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp slt <vscale x 8 x i32> %splat, %va
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_slt_vi_nxv8i32_0(<vscale x 8 x i32> %va) {
+; CHECK-LABEL: icmp_slt_vi_nxv8i32_0:
+; CHECK: # %bb.0:
+; CHECK-NEXT: addi a0, zero, -16
+; CHECK-NEXT: vsetvli a1, zero, e32,m4,ta,mu
+; CHECK-NEXT: vmslt.vx v0, v16, a0
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i32> undef, i32 -16, i32 0
+ %splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp slt <vscale x 8 x i32> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_slt_vi_nxv8i32_1(<vscale x 8 x i32> %va) {
+; CHECK-LABEL: icmp_slt_vi_nxv8i32_1:
+; CHECK: # %bb.0:
+; CHECK-NEXT: addi a0, zero, -15
+; CHECK-NEXT: vsetvli a1, zero, e32,m4,ta,mu
+; CHECK-NEXT: vmslt.vx v0, v16, a0
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i32> undef, i32 -15, i32 0
+ %splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp slt <vscale x 8 x i32> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_slt_iv_nxv8i32_1(<vscale x 8 x i32> %va) {
+; CHECK-LABEL: icmp_slt_iv_nxv8i32_1:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu
+; CHECK-NEXT: vmsgt.vi v0, v16, -15
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i32> undef, i32 -15, i32 0
+ %splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp slt <vscale x 8 x i32> %splat, %va
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_slt_vi_nxv8i32_2(<vscale x 8 x i32> %va) {
+; CHECK-LABEL: icmp_slt_vi_nxv8i32_2:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu
+; CHECK-NEXT: vmslt.vx v0, v16, zero
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i32> undef, i32 0, i32 0
+ %splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp slt <vscale x 8 x i32> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_slt_vi_nxv8i32_3(<vscale x 8 x i32> %va) {
+; CHECK-LABEL: icmp_slt_vi_nxv8i32_3:
+; CHECK: # %bb.0:
+; CHECK-NEXT: addi a0, zero, 16
+; CHECK-NEXT: vsetvli a1, zero, e32,m4,ta,mu
+; CHECK-NEXT: vmslt.vx v0, v16, a0
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i32> undef, i32 16, i32 0
+ %splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp slt <vscale x 8 x i32> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_sle_vv_nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %vb) {
+; CHECK-LABEL: icmp_sle_vv_nxv8i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu
+; CHECK-NEXT: vmsle.vv v0, v16, v20
+; CHECK-NEXT: ret
+ %vc = icmp sle <vscale x 8 x i32> %va, %vb
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_sle_vx_nxv8i32(<vscale x 8 x i32> %va, i32 %b) {
+; CHECK-LABEL: icmp_sle_vx_nxv8i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, zero, e32,m4,ta,mu
+; CHECK-NEXT: vmsle.vx v0, v16, a0
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i32> undef, i32 %b, i32 0
+ %splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp sle <vscale x 8 x i32> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_sle_xv_nxv8i32(<vscale x 8 x i32> %va, i32 %b) {
+; CHECK-LABEL: icmp_sle_xv_nxv8i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, zero, e32,m4,ta,mu
+; CHECK-NEXT: vmv.v.x v28, a0
+; CHECK-NEXT: vmsle.vv v0, v28, v16
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i32> undef, i32 %b, i32 0
+ %splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp sle <vscale x 8 x i32> %splat, %va
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_sle_vi_nxv8i32_0(<vscale x 8 x i32> %va) {
+; CHECK-LABEL: icmp_sle_vi_nxv8i32_0:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu
+; CHECK-NEXT: vmsle.vi v0, v16, 5
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i32> undef, i32 5, i32 0
+ %splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp sle <vscale x 8 x i32> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_eq_vv_nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %vb) {
+; CHECK-LABEL: icmp_eq_vv_nxv8i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu
+; CHECK-NEXT: vle64.v v8, (a0)
+; CHECK-NEXT: vmseq.vv v0, v16, v8
+; CHECK-NEXT: ret
+ %vc = icmp eq <vscale x 8 x i64> %va, %vb
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_eq_vx_nxv8i64(<vscale x 8 x i64> %va, i64 %b) {
+; CHECK-LABEL: icmp_eq_vx_nxv8i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu
+; CHECK-NEXT: vmseq.vx v0, v16, a0
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i64> undef, i64 %b, i32 0
+ %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp eq <vscale x 8 x i64> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_eq_xv_nxv8i64(<vscale x 8 x i64> %va, i64 %b) {
+; CHECK-LABEL: icmp_eq_xv_nxv8i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu
+; CHECK-NEXT: vmv.v.x v8, a0
+; CHECK-NEXT: vmseq.vv v0, v8, v16
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i64> undef, i64 %b, i32 0
+ %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp eq <vscale x 8 x i64> %splat, %va
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_eq_vi_nxv8i64_0(<vscale x 8 x i64> %va) {
+; CHECK-LABEL: icmp_eq_vi_nxv8i64_0:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu
+; CHECK-NEXT: vmseq.vi v0, v16, 0
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i64> undef, i64 0, i32 0
+ %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp eq <vscale x 8 x i64> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_eq_vi_nxv8i64_1(<vscale x 8 x i64> %va) {
+; CHECK-LABEL: icmp_eq_vi_nxv8i64_1:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu
+; CHECK-NEXT: vmseq.vi v0, v16, 5
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i64> undef, i64 5, i32 0
+ %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp eq <vscale x 8 x i64> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_eq_iv_nxv8i64_1(<vscale x 8 x i64> %va) {
+; CHECK-LABEL: icmp_eq_iv_nxv8i64_1:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu
+; CHECK-NEXT: vmseq.vi v0, v16, 5
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i64> undef, i64 5, i32 0
+ %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp eq <vscale x 8 x i64> %splat, %va
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_ne_vv_nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %vb) {
+; CHECK-LABEL: icmp_ne_vv_nxv8i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu
+; CHECK-NEXT: vle64.v v8, (a0)
+; CHECK-NEXT: vmsne.vv v0, v16, v8
+; CHECK-NEXT: ret
+ %vc = icmp ne <vscale x 8 x i64> %va, %vb
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_ne_vx_nxv8i64(<vscale x 8 x i64> %va, i64 %b) {
+; CHECK-LABEL: icmp_ne_vx_nxv8i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu
+; CHECK-NEXT: vmsne.vx v0, v16, a0
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i64> undef, i64 %b, i32 0
+ %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp ne <vscale x 8 x i64> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_ne_xv_nxv8i64(<vscale x 8 x i64> %va, i64 %b) {
+; CHECK-LABEL: icmp_ne_xv_nxv8i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu
+; CHECK-NEXT: vmv.v.x v8, a0
+; CHECK-NEXT: vmsne.vv v0, v8, v16
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i64> undef, i64 %b, i32 0
+ %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp ne <vscale x 8 x i64> %splat, %va
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_ne_vi_nxv8i64_0(<vscale x 8 x i64> %va) {
+; CHECK-LABEL: icmp_ne_vi_nxv8i64_0:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu
+; CHECK-NEXT: vmsne.vi v0, v16, 5
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i64> undef, i64 5, i32 0
+ %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp ne <vscale x 8 x i64> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_ugt_vv_nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %vb) {
+; CHECK-LABEL: icmp_ugt_vv_nxv8i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu
+; CHECK-NEXT: vle64.v v8, (a0)
+; CHECK-NEXT: vmsltu.vv v0, v8, v16
+; CHECK-NEXT: ret
+ %vc = icmp ugt <vscale x 8 x i64> %va, %vb
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_ugt_vx_nxv8i64(<vscale x 8 x i64> %va, i64 %b) {
+; CHECK-LABEL: icmp_ugt_vx_nxv8i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu
+; CHECK-NEXT: vmsgtu.vx v0, v16, a0
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i64> undef, i64 %b, i32 0
+ %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp ugt <vscale x 8 x i64> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_ugt_xv_nxv8i64(<vscale x 8 x i64> %va, i64 %b) {
+; CHECK-LABEL: icmp_ugt_xv_nxv8i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu
+; CHECK-NEXT: vmv.v.x v8, a0
+; CHECK-NEXT: vmsltu.vv v0, v16, v8
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i64> undef, i64 %b, i32 0
+ %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp ugt <vscale x 8 x i64> %splat, %va
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_ugt_vi_nxv8i64_0(<vscale x 8 x i64> %va) {
+; CHECK-LABEL: icmp_ugt_vi_nxv8i64_0:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu
+; CHECK-NEXT: vmsgtu.vi v0, v16, 5
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i64> undef, i64 5, i32 0
+ %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp ugt <vscale x 8 x i64> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_uge_vv_nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %vb) {
+; CHECK-LABEL: icmp_uge_vv_nxv8i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu
+; CHECK-NEXT: vle64.v v8, (a0)
+; CHECK-NEXT: vmsleu.vv v0, v8, v16
+; CHECK-NEXT: ret
+ %vc = icmp uge <vscale x 8 x i64> %va, %vb
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_uge_vx_nxv8i64(<vscale x 8 x i64> %va, i64 %b) {
+; CHECK-LABEL: icmp_uge_vx_nxv8i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu
+; CHECK-NEXT: vmv.v.x v8, a0
+; CHECK-NEXT: vmsleu.vv v0, v8, v16
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i64> undef, i64 %b, i32 0
+ %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp uge <vscale x 8 x i64> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_uge_xv_nxv8i64(<vscale x 8 x i64> %va, i64 %b) {
+; CHECK-LABEL: icmp_uge_xv_nxv8i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu
+; CHECK-NEXT: vmv.v.x v8, a0
+; CHECK-NEXT: vmsleu.vv v0, v16, v8
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i64> undef, i64 %b, i32 0
+ %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp uge <vscale x 8 x i64> %splat, %va
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_uge_vi_nxv8i64_0(<vscale x 8 x i64> %va) {
+; CHECK-LABEL: icmp_uge_vi_nxv8i64_0:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu
+; CHECK-NEXT: vmv.v.i v8, -16
+; CHECK-NEXT: vmsleu.vv v0, v8, v16
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i64> undef, i64 -16, i32 0
+ %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp uge <vscale x 8 x i64> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_uge_vi_nxv8i64_1(<vscale x 8 x i64> %va) {
+; CHECK-LABEL: icmp_uge_vi_nxv8i64_1:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu
+; CHECK-NEXT: vmv.v.i v8, 15
+; CHECK-NEXT: vmsleu.vv v0, v8, v16
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i64> undef, i64 15, i32 0
+ %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp uge <vscale x 8 x i64> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_uge_iv_nxv8i64_1(<vscale x 8 x i64> %va) {
+; CHECK-LABEL: icmp_uge_iv_nxv8i64_1:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu
+; CHECK-NEXT: vmsleu.vi v0, v16, 15
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i64> undef, i64 15, i32 0
+ %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp uge <vscale x 8 x i64> %splat, %va
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_uge_vi_nxv8i64_2(<vscale x 8 x i64> %va) {
+; CHECK-LABEL: icmp_uge_vi_nxv8i64_2:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu
+; CHECK-NEXT: vmset.m v0
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i64> undef, i64 0, i32 0
+ %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp uge <vscale x 8 x i64> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_uge_vi_nxv8i64_3(<vscale x 8 x i64> %va) {
+; CHECK-LABEL: icmp_uge_vi_nxv8i64_3:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu
+; CHECK-NEXT: vmv.v.i v8, 1
+; CHECK-NEXT: vmsleu.vv v0, v8, v16
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i64> undef, i64 1, i32 0
+ %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp uge <vscale x 8 x i64> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_uge_vi_nxv8i64_4(<vscale x 8 x i64> %va) {
+; CHECK-LABEL: icmp_uge_vi_nxv8i64_4:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu
+; CHECK-NEXT: vmv.v.i v8, -15
+; CHECK-NEXT: vmsleu.vv v0, v8, v16
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i64> undef, i64 -15, i32 0
+ %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp uge <vscale x 8 x i64> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_uge_vi_nxv8i64_5(<vscale x 8 x i64> %va) {
+; CHECK-LABEL: icmp_uge_vi_nxv8i64_5:
+; CHECK: # %bb.0:
+; CHECK-NEXT: addi a0, zero, 16
+; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu
+; CHECK-NEXT: vmv.v.x v8, a0
+; CHECK-NEXT: vmsleu.vv v0, v8, v16
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i64> undef, i64 16, i32 0
+ %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp uge <vscale x 8 x i64> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_ult_vv_nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %vb) {
+; CHECK-LABEL: icmp_ult_vv_nxv8i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu
+; CHECK-NEXT: vle64.v v8, (a0)
+; CHECK-NEXT: vmsltu.vv v0, v16, v8
+; CHECK-NEXT: ret
+ %vc = icmp ult <vscale x 8 x i64> %va, %vb
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_ult_vx_nxv8i64(<vscale x 8 x i64> %va, i64 %b) {
+; CHECK-LABEL: icmp_ult_vx_nxv8i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu
+; CHECK-NEXT: vmsltu.vx v0, v16, a0
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i64> undef, i64 %b, i32 0
+ %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp ult <vscale x 8 x i64> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_ult_xv_nxv8i64(<vscale x 8 x i64> %va, i64 %b) {
+; CHECK-LABEL: icmp_ult_xv_nxv8i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu
+; CHECK-NEXT: vmv.v.x v8, a0
+; CHECK-NEXT: vmsltu.vv v0, v8, v16
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i64> undef, i64 %b, i32 0
+ %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp ult <vscale x 8 x i64> %splat, %va
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_ult_vi_nxv8i64_0(<vscale x 8 x i64> %va) {
+; CHECK-LABEL: icmp_ult_vi_nxv8i64_0:
+; CHECK: # %bb.0:
+; CHECK-NEXT: addi a0, zero, -16
+; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu
+; CHECK-NEXT: vmsltu.vx v0, v16, a0
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i64> undef, i64 -16, i32 0
+ %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp ult <vscale x 8 x i64> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_ult_vi_nxv8i64_1(<vscale x 8 x i64> %va) {
+; CHECK-LABEL: icmp_ult_vi_nxv8i64_1:
+; CHECK: # %bb.0:
+; CHECK-NEXT: addi a0, zero, -15
+; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu
+; CHECK-NEXT: vmsltu.vx v0, v16, a0
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i64> undef, i64 -15, i32 0
+ %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp ult <vscale x 8 x i64> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_ult_iv_nxv8i64_1(<vscale x 8 x i64> %va) {
+; CHECK-LABEL: icmp_ult_iv_nxv8i64_1:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu
+; CHECK-NEXT: vmsgtu.vi v0, v16, -15
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i64> undef, i64 -15, i32 0
+ %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp ult <vscale x 8 x i64> %splat, %va
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_ult_vi_nxv8i64_2(<vscale x 8 x i64> %va) {
+; CHECK-LABEL: icmp_ult_vi_nxv8i64_2:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu
+; CHECK-NEXT: vmclr.m v0
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i64> undef, i64 0, i32 0
+ %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp ult <vscale x 8 x i64> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_ult_vi_nxv8i64_3(<vscale x 8 x i64> %va) {
+; CHECK-LABEL: icmp_ult_vi_nxv8i64_3:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu
+; CHECK-NEXT: vmseq.vi v0, v16, 0
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i64> undef, i64 1, i32 0
+ %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp ult <vscale x 8 x i64> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_ult_vi_nxv8i64_4(<vscale x 8 x i64> %va) {
+; CHECK-LABEL: icmp_ult_vi_nxv8i64_4:
+; CHECK: # %bb.0:
+; CHECK-NEXT: addi a0, zero, 16
+; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu
+; CHECK-NEXT: vmsltu.vx v0, v16, a0
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i64> undef, i64 16, i32 0
+ %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp ult <vscale x 8 x i64> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_ule_vv_nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %vb) {
+; CHECK-LABEL: icmp_ule_vv_nxv8i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu
+; CHECK-NEXT: vle64.v v8, (a0)
+; CHECK-NEXT: vmsleu.vv v0, v16, v8
+; CHECK-NEXT: ret
+ %vc = icmp ule <vscale x 8 x i64> %va, %vb
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_ule_vx_nxv8i64(<vscale x 8 x i64> %va, i64 %b) {
+; CHECK-LABEL: icmp_ule_vx_nxv8i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu
+; CHECK-NEXT: vmsleu.vx v0, v16, a0
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i64> undef, i64 %b, i32 0
+ %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp ule <vscale x 8 x i64> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_ule_xv_nxv8i64(<vscale x 8 x i64> %va, i64 %b) {
+; CHECK-LABEL: icmp_ule_xv_nxv8i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu
+; CHECK-NEXT: vmv.v.x v8, a0
+; CHECK-NEXT: vmsleu.vv v0, v8, v16
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i64> undef, i64 %b, i32 0
+ %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp ule <vscale x 8 x i64> %splat, %va
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_ule_vi_nxv8i64_0(<vscale x 8 x i64> %va) {
+; CHECK-LABEL: icmp_ule_vi_nxv8i64_0:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu
+; CHECK-NEXT: vmsleu.vi v0, v16, 5
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i64> undef, i64 5, i32 0
+ %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp ule <vscale x 8 x i64> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_sgt_vv_nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %vb) {
+; CHECK-LABEL: icmp_sgt_vv_nxv8i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu
+; CHECK-NEXT: vle64.v v8, (a0)
+; CHECK-NEXT: vmslt.vv v0, v8, v16
+; CHECK-NEXT: ret
+ %vc = icmp sgt <vscale x 8 x i64> %va, %vb
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_sgt_vx_nxv8i64(<vscale x 8 x i64> %va, i64 %b) {
+; CHECK-LABEL: icmp_sgt_vx_nxv8i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu
+; CHECK-NEXT: vmsgt.vx v0, v16, a0
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i64> undef, i64 %b, i32 0
+ %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp sgt <vscale x 8 x i64> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_sgt_xv_nxv8i64(<vscale x 8 x i64> %va, i64 %b) {
+; CHECK-LABEL: icmp_sgt_xv_nxv8i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu
+; CHECK-NEXT: vmv.v.x v8, a0
+; CHECK-NEXT: vmslt.vv v0, v16, v8
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i64> undef, i64 %b, i32 0
+ %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp sgt <vscale x 8 x i64> %splat, %va
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_sgt_vi_nxv8i64_0(<vscale x 8 x i64> %va) {
+; CHECK-LABEL: icmp_sgt_vi_nxv8i64_0:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu
+; CHECK-NEXT: vmsgt.vi v0, v16, 5
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i64> undef, i64 5, i32 0
+ %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp sgt <vscale x 8 x i64> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_sge_vv_nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %vb) {
+; CHECK-LABEL: icmp_sge_vv_nxv8i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu
+; CHECK-NEXT: vle64.v v8, (a0)
+; CHECK-NEXT: vmsle.vv v0, v8, v16
+; CHECK-NEXT: ret
+ %vc = icmp sge <vscale x 8 x i64> %va, %vb
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_sge_vx_nxv8i64(<vscale x 8 x i64> %va, i64 %b) {
+; CHECK-LABEL: icmp_sge_vx_nxv8i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu
+; CHECK-NEXT: vmv.v.x v8, a0
+; CHECK-NEXT: vmsle.vv v0, v8, v16
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i64> undef, i64 %b, i32 0
+ %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp sge <vscale x 8 x i64> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_sge_xv_nxv8i64(<vscale x 8 x i64> %va, i64 %b) {
+; CHECK-LABEL: icmp_sge_xv_nxv8i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu
+; CHECK-NEXT: vmv.v.x v8, a0
+; CHECK-NEXT: vmsle.vv v0, v16, v8
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i64> undef, i64 %b, i32 0
+ %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp sge <vscale x 8 x i64> %splat, %va
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_sge_vi_nxv8i64_0(<vscale x 8 x i64> %va) {
+; CHECK-LABEL: icmp_sge_vi_nxv8i64_0:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu
+; CHECK-NEXT: vmv.v.i v8, -16
+; CHECK-NEXT: vmsle.vv v0, v8, v16
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i64> undef, i64 -16, i32 0
+ %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp sge <vscale x 8 x i64> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_sge_vi_nxv8i64_1(<vscale x 8 x i64> %va) {
+; CHECK-LABEL: icmp_sge_vi_nxv8i64_1:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu
+; CHECK-NEXT: vmv.v.i v8, -15
+; CHECK-NEXT: vmsle.vv v0, v8, v16
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i64> undef, i64 -15, i32 0
+ %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp sge <vscale x 8 x i64> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_sge_iv_nxv8i64_1(<vscale x 8 x i64> %va) {
+; CHECK-LABEL: icmp_sge_iv_nxv8i64_1:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu
+; CHECK-NEXT: vmsle.vi v0, v16, -15
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i64> undef, i64 -15, i32 0
+ %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp sge <vscale x 8 x i64> %splat, %va
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_sge_vi_nxv8i64_2(<vscale x 8 x i64> %va) {
+; CHECK-LABEL: icmp_sge_vi_nxv8i64_2:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu
+; CHECK-NEXT: vmv.v.i v8, 0
+; CHECK-NEXT: vmsle.vv v0, v8, v16
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i64> undef, i64 0, i32 0
+ %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp sge <vscale x 8 x i64> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_sge_vi_nxv8i64_3(<vscale x 8 x i64> %va) {
+; CHECK-LABEL: icmp_sge_vi_nxv8i64_3:
+; CHECK: # %bb.0:
+; CHECK-NEXT: addi a0, zero, 16
+; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu
+; CHECK-NEXT: vmv.v.x v8, a0
+; CHECK-NEXT: vmsle.vv v0, v8, v16
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i64> undef, i64 16, i32 0
+ %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp sge <vscale x 8 x i64> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_slt_vv_nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %vb) {
+; CHECK-LABEL: icmp_slt_vv_nxv8i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu
+; CHECK-NEXT: vle64.v v8, (a0)
+; CHECK-NEXT: vmslt.vv v0, v16, v8
+; CHECK-NEXT: ret
+ %vc = icmp slt <vscale x 8 x i64> %va, %vb
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_slt_vx_nxv8i64(<vscale x 8 x i64> %va, i64 %b) {
+; CHECK-LABEL: icmp_slt_vx_nxv8i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu
+; CHECK-NEXT: vmslt.vx v0, v16, a0
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i64> undef, i64 %b, i32 0
+ %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp slt <vscale x 8 x i64> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_slt_xv_nxv8i64(<vscale x 8 x i64> %va, i64 %b) {
+; CHECK-LABEL: icmp_slt_xv_nxv8i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu
+; CHECK-NEXT: vmv.v.x v8, a0
+; CHECK-NEXT: vmslt.vv v0, v8, v16
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i64> undef, i64 %b, i32 0
+ %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp slt <vscale x 8 x i64> %splat, %va
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_slt_vi_nxv8i64_0(<vscale x 8 x i64> %va) {
+; CHECK-LABEL: icmp_slt_vi_nxv8i64_0:
+; CHECK: # %bb.0:
+; CHECK-NEXT: addi a0, zero, -16
+; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu
+; CHECK-NEXT: vmslt.vx v0, v16, a0
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i64> undef, i64 -16, i32 0
+ %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp slt <vscale x 8 x i64> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_slt_vi_nxv8i64_1(<vscale x 8 x i64> %va) {
+; CHECK-LABEL: icmp_slt_vi_nxv8i64_1:
+; CHECK: # %bb.0:
+; CHECK-NEXT: addi a0, zero, -15
+; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu
+; CHECK-NEXT: vmslt.vx v0, v16, a0
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i64> undef, i64 -15, i32 0
+ %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp slt <vscale x 8 x i64> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_slt_iv_nxv8i64_1(<vscale x 8 x i64> %va) {
+; CHECK-LABEL: icmp_slt_iv_nxv8i64_1:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu
+; CHECK-NEXT: vmsgt.vi v0, v16, -15
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i64> undef, i64 -15, i32 0
+ %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp slt <vscale x 8 x i64> %splat, %va
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_slt_vi_nxv8i64_2(<vscale x 8 x i64> %va) {
+; CHECK-LABEL: icmp_slt_vi_nxv8i64_2:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu
+; CHECK-NEXT: vmslt.vx v0, v16, zero
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i64> undef, i64 0, i32 0
+ %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp slt <vscale x 8 x i64> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_slt_vi_nxv8i64_3(<vscale x 8 x i64> %va) {
+; CHECK-LABEL: icmp_slt_vi_nxv8i64_3:
+; CHECK: # %bb.0:
+; CHECK-NEXT: addi a0, zero, 16
+; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu
+; CHECK-NEXT: vmslt.vx v0, v16, a0
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i64> undef, i64 16, i32 0
+ %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp slt <vscale x 8 x i64> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_sle_vv_nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %vb) {
+; CHECK-LABEL: icmp_sle_vv_nxv8i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu
+; CHECK-NEXT: vle64.v v8, (a0)
+; CHECK-NEXT: vmsle.vv v0, v16, v8
+; CHECK-NEXT: ret
+ %vc = icmp sle <vscale x 8 x i64> %va, %vb
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_sle_vx_nxv8i64(<vscale x 8 x i64> %va, i64 %b) {
+; CHECK-LABEL: icmp_sle_vx_nxv8i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu
+; CHECK-NEXT: vmsle.vx v0, v16, a0
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i64> undef, i64 %b, i32 0
+ %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp sle <vscale x 8 x i64> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_sle_xv_nxv8i64(<vscale x 8 x i64> %va, i64 %b) {
+; CHECK-LABEL: icmp_sle_xv_nxv8i64:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu
+; CHECK-NEXT: vmv.v.x v8, a0
+; CHECK-NEXT: vmsle.vv v0, v8, v16
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i64> undef, i64 %b, i32 0
+ %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp sle <vscale x 8 x i64> %splat, %va
+ ret <vscale x 8 x i1> %vc
+}
+
+define <vscale x 8 x i1> @icmp_sle_vi_nxv8i64_0(<vscale x 8 x i64> %va) {
+; CHECK-LABEL: icmp_sle_vi_nxv8i64_0:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu
+; CHECK-NEXT: vmsle.vi v0, v16, 5
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i64> undef, i64 5, i32 0
+ %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> undef, <vscale x 8 x i32> zeroinitializer
+ %vc = icmp sle <vscale x 8 x i64> %va, %splat
+ ret <vscale x 8 x i1> %vc
+}
+