--- /dev/null
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=riscv32 -mattr=+d,+zfh,+experimental-zvfh,+v,+m -target-abi=ilp32d \
+; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV32
+; RUN: llc -mtriple=riscv64 -mattr=+d,+zfh,+experimental-zvfh,+v,+m -target-abi=lp64d \
+; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV64
+
+declare <vscale x 1 x i8> @llvm.vp.ctpop.nxv1i8(<vscale x 1 x i8>, <vscale x 1 x i1>, i32)
+
+define <vscale x 1 x i8> @vp_ctpop_nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vp_ctpop_nxv1i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
+; CHECK-NEXT: vsrl.vi v9, v8, 1, v0.t
+; CHECK-NEXT: li a0, 85
+; CHECK-NEXT: vand.vx v9, v9, a0, v0.t
+; CHECK-NEXT: vsub.vv v8, v8, v9, v0.t
+; CHECK-NEXT: li a0, 51
+; CHECK-NEXT: vand.vx v9, v8, a0, v0.t
+; CHECK-NEXT: vsrl.vi v8, v8, 2, v0.t
+; CHECK-NEXT: vand.vx v8, v8, a0, v0.t
+; CHECK-NEXT: vadd.vv v8, v9, v8, v0.t
+; CHECK-NEXT: vsrl.vi v9, v8, 4, v0.t
+; CHECK-NEXT: vadd.vv v8, v8, v9, v0.t
+; CHECK-NEXT: vand.vi v8, v8, 15, v0.t
+; CHECK-NEXT: ret
+ %v = call <vscale x 1 x i8> @llvm.vp.ctpop.nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i1> %m, i32 %evl)
+ ret <vscale x 1 x i8> %v
+}
+
+define <vscale x 1 x i8> @vp_ctpop_nxv1i8_unmasked(<vscale x 1 x i8> %va, i32 zeroext %evl) {
+; CHECK-LABEL: vp_ctpop_nxv1i8_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
+; CHECK-NEXT: vsrl.vi v9, v8, 1
+; CHECK-NEXT: li a0, 85
+; CHECK-NEXT: vand.vx v9, v9, a0
+; CHECK-NEXT: vsub.vv v8, v8, v9
+; CHECK-NEXT: li a0, 51
+; CHECK-NEXT: vand.vx v9, v8, a0
+; CHECK-NEXT: vsrl.vi v8, v8, 2
+; CHECK-NEXT: vand.vx v8, v8, a0
+; CHECK-NEXT: vadd.vv v8, v9, v8
+; CHECK-NEXT: vsrl.vi v9, v8, 4
+; CHECK-NEXT: vadd.vv v8, v8, v9
+; CHECK-NEXT: vand.vi v8, v8, 15
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
+ %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
+ %v = call <vscale x 1 x i8> @llvm.vp.ctpop.nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i1> %m, i32 %evl)
+ ret <vscale x 1 x i8> %v
+}
+
+declare <vscale x 2 x i8> @llvm.vp.ctpop.nxv2i8(<vscale x 2 x i8>, <vscale x 2 x i1>, i32)
+
+define <vscale x 2 x i8> @vp_ctpop_nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vp_ctpop_nxv2i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
+; CHECK-NEXT: vsrl.vi v9, v8, 1, v0.t
+; CHECK-NEXT: li a0, 85
+; CHECK-NEXT: vand.vx v9, v9, a0, v0.t
+; CHECK-NEXT: vsub.vv v8, v8, v9, v0.t
+; CHECK-NEXT: li a0, 51
+; CHECK-NEXT: vand.vx v9, v8, a0, v0.t
+; CHECK-NEXT: vsrl.vi v8, v8, 2, v0.t
+; CHECK-NEXT: vand.vx v8, v8, a0, v0.t
+; CHECK-NEXT: vadd.vv v8, v9, v8, v0.t
+; CHECK-NEXT: vsrl.vi v9, v8, 4, v0.t
+; CHECK-NEXT: vadd.vv v8, v8, v9, v0.t
+; CHECK-NEXT: vand.vi v8, v8, 15, v0.t
+; CHECK-NEXT: ret
+ %v = call <vscale x 2 x i8> @llvm.vp.ctpop.nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i1> %m, i32 %evl)
+ ret <vscale x 2 x i8> %v
+}
+
+define <vscale x 2 x i8> @vp_ctpop_nxv2i8_unmasked(<vscale x 2 x i8> %va, i32 zeroext %evl) {
+; CHECK-LABEL: vp_ctpop_nxv2i8_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
+; CHECK-NEXT: vsrl.vi v9, v8, 1
+; CHECK-NEXT: li a0, 85
+; CHECK-NEXT: vand.vx v9, v9, a0
+; CHECK-NEXT: vsub.vv v8, v8, v9
+; CHECK-NEXT: li a0, 51
+; CHECK-NEXT: vand.vx v9, v8, a0
+; CHECK-NEXT: vsrl.vi v8, v8, 2
+; CHECK-NEXT: vand.vx v8, v8, a0
+; CHECK-NEXT: vadd.vv v8, v9, v8
+; CHECK-NEXT: vsrl.vi v9, v8, 4
+; CHECK-NEXT: vadd.vv v8, v8, v9
+; CHECK-NEXT: vand.vi v8, v8, 15
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
+ %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
+ %v = call <vscale x 2 x i8> @llvm.vp.ctpop.nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i1> %m, i32 %evl)
+ ret <vscale x 2 x i8> %v
+}
+
+declare <vscale x 4 x i8> @llvm.vp.ctpop.nxv4i8(<vscale x 4 x i8>, <vscale x 4 x i1>, i32)
+
+define <vscale x 4 x i8> @vp_ctpop_nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vp_ctpop_nxv4i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
+; CHECK-NEXT: vsrl.vi v9, v8, 1, v0.t
+; CHECK-NEXT: li a0, 85
+; CHECK-NEXT: vand.vx v9, v9, a0, v0.t
+; CHECK-NEXT: vsub.vv v8, v8, v9, v0.t
+; CHECK-NEXT: li a0, 51
+; CHECK-NEXT: vand.vx v9, v8, a0, v0.t
+; CHECK-NEXT: vsrl.vi v8, v8, 2, v0.t
+; CHECK-NEXT: vand.vx v8, v8, a0, v0.t
+; CHECK-NEXT: vadd.vv v8, v9, v8, v0.t
+; CHECK-NEXT: vsrl.vi v9, v8, 4, v0.t
+; CHECK-NEXT: vadd.vv v8, v8, v9, v0.t
+; CHECK-NEXT: vand.vi v8, v8, 15, v0.t
+; CHECK-NEXT: ret
+ %v = call <vscale x 4 x i8> @llvm.vp.ctpop.nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i1> %m, i32 %evl)
+ ret <vscale x 4 x i8> %v
+}
+
+define <vscale x 4 x i8> @vp_ctpop_nxv4i8_unmasked(<vscale x 4 x i8> %va, i32 zeroext %evl) {
+; CHECK-LABEL: vp_ctpop_nxv4i8_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
+; CHECK-NEXT: vsrl.vi v9, v8, 1
+; CHECK-NEXT: li a0, 85
+; CHECK-NEXT: vand.vx v9, v9, a0
+; CHECK-NEXT: vsub.vv v8, v8, v9
+; CHECK-NEXT: li a0, 51
+; CHECK-NEXT: vand.vx v9, v8, a0
+; CHECK-NEXT: vsrl.vi v8, v8, 2
+; CHECK-NEXT: vand.vx v8, v8, a0
+; CHECK-NEXT: vadd.vv v8, v9, v8
+; CHECK-NEXT: vsrl.vi v9, v8, 4
+; CHECK-NEXT: vadd.vv v8, v8, v9
+; CHECK-NEXT: vand.vi v8, v8, 15
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
+ %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
+ %v = call <vscale x 4 x i8> @llvm.vp.ctpop.nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i1> %m, i32 %evl)
+ ret <vscale x 4 x i8> %v
+}
+
+declare <vscale x 8 x i8> @llvm.vp.ctpop.nxv8i8(<vscale x 8 x i8>, <vscale x 8 x i1>, i32)
+
+define <vscale x 8 x i8> @vp_ctpop_nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vp_ctpop_nxv8i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
+; CHECK-NEXT: vsrl.vi v9, v8, 1, v0.t
+; CHECK-NEXT: li a0, 85
+; CHECK-NEXT: vand.vx v9, v9, a0, v0.t
+; CHECK-NEXT: vsub.vv v8, v8, v9, v0.t
+; CHECK-NEXT: li a0, 51
+; CHECK-NEXT: vand.vx v9, v8, a0, v0.t
+; CHECK-NEXT: vsrl.vi v8, v8, 2, v0.t
+; CHECK-NEXT: vand.vx v8, v8, a0, v0.t
+; CHECK-NEXT: vadd.vv v8, v9, v8, v0.t
+; CHECK-NEXT: vsrl.vi v9, v8, 4, v0.t
+; CHECK-NEXT: vadd.vv v8, v8, v9, v0.t
+; CHECK-NEXT: vand.vi v8, v8, 15, v0.t
+; CHECK-NEXT: ret
+ %v = call <vscale x 8 x i8> @llvm.vp.ctpop.nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i1> %m, i32 %evl)
+ ret <vscale x 8 x i8> %v
+}
+
+define <vscale x 8 x i8> @vp_ctpop_nxv8i8_unmasked(<vscale x 8 x i8> %va, i32 zeroext %evl) {
+; CHECK-LABEL: vp_ctpop_nxv8i8_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
+; CHECK-NEXT: vsrl.vi v9, v8, 1
+; CHECK-NEXT: li a0, 85
+; CHECK-NEXT: vand.vx v9, v9, a0
+; CHECK-NEXT: vsub.vv v8, v8, v9
+; CHECK-NEXT: li a0, 51
+; CHECK-NEXT: vand.vx v9, v8, a0
+; CHECK-NEXT: vsrl.vi v8, v8, 2
+; CHECK-NEXT: vand.vx v8, v8, a0
+; CHECK-NEXT: vadd.vv v8, v9, v8
+; CHECK-NEXT: vsrl.vi v9, v8, 4
+; CHECK-NEXT: vadd.vv v8, v8, v9
+; CHECK-NEXT: vand.vi v8, v8, 15
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
+ %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
+ %v = call <vscale x 8 x i8> @llvm.vp.ctpop.nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i1> %m, i32 %evl)
+ ret <vscale x 8 x i8> %v
+}
+
+declare <vscale x 16 x i8> @llvm.vp.ctpop.nxv16i8(<vscale x 16 x i8>, <vscale x 16 x i1>, i32)
+
+define <vscale x 16 x i8> @vp_ctpop_nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vp_ctpop_nxv16i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
+; CHECK-NEXT: vsrl.vi v10, v8, 1, v0.t
+; CHECK-NEXT: li a0, 85
+; CHECK-NEXT: vand.vx v10, v10, a0, v0.t
+; CHECK-NEXT: vsub.vv v8, v8, v10, v0.t
+; CHECK-NEXT: li a0, 51
+; CHECK-NEXT: vand.vx v10, v8, a0, v0.t
+; CHECK-NEXT: vsrl.vi v8, v8, 2, v0.t
+; CHECK-NEXT: vand.vx v8, v8, a0, v0.t
+; CHECK-NEXT: vadd.vv v8, v10, v8, v0.t
+; CHECK-NEXT: vsrl.vi v10, v8, 4, v0.t
+; CHECK-NEXT: vadd.vv v8, v8, v10, v0.t
+; CHECK-NEXT: vand.vi v8, v8, 15, v0.t
+; CHECK-NEXT: ret
+ %v = call <vscale x 16 x i8> @llvm.vp.ctpop.nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i1> %m, i32 %evl)
+ ret <vscale x 16 x i8> %v
+}
+
+define <vscale x 16 x i8> @vp_ctpop_nxv16i8_unmasked(<vscale x 16 x i8> %va, i32 zeroext %evl) {
+; CHECK-LABEL: vp_ctpop_nxv16i8_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
+; CHECK-NEXT: vsrl.vi v10, v8, 1
+; CHECK-NEXT: li a0, 85
+; CHECK-NEXT: vand.vx v10, v10, a0
+; CHECK-NEXT: vsub.vv v8, v8, v10
+; CHECK-NEXT: li a0, 51
+; CHECK-NEXT: vand.vx v10, v8, a0
+; CHECK-NEXT: vsrl.vi v8, v8, 2
+; CHECK-NEXT: vand.vx v8, v8, a0
+; CHECK-NEXT: vadd.vv v8, v10, v8
+; CHECK-NEXT: vsrl.vi v10, v8, 4
+; CHECK-NEXT: vadd.vv v8, v8, v10
+; CHECK-NEXT: vand.vi v8, v8, 15
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
+ %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
+ %v = call <vscale x 16 x i8> @llvm.vp.ctpop.nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i1> %m, i32 %evl)
+ ret <vscale x 16 x i8> %v
+}
+
+declare <vscale x 32 x i8> @llvm.vp.ctpop.nxv32i8(<vscale x 32 x i8>, <vscale x 32 x i1>, i32)
+
+define <vscale x 32 x i8> @vp_ctpop_nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vp_ctpop_nxv32i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
+; CHECK-NEXT: vsrl.vi v12, v8, 1, v0.t
+; CHECK-NEXT: li a0, 85
+; CHECK-NEXT: vand.vx v12, v12, a0, v0.t
+; CHECK-NEXT: vsub.vv v8, v8, v12, v0.t
+; CHECK-NEXT: li a0, 51
+; CHECK-NEXT: vand.vx v12, v8, a0, v0.t
+; CHECK-NEXT: vsrl.vi v8, v8, 2, v0.t
+; CHECK-NEXT: vand.vx v8, v8, a0, v0.t
+; CHECK-NEXT: vadd.vv v8, v12, v8, v0.t
+; CHECK-NEXT: vsrl.vi v12, v8, 4, v0.t
+; CHECK-NEXT: vadd.vv v8, v8, v12, v0.t
+; CHECK-NEXT: vand.vi v8, v8, 15, v0.t
+; CHECK-NEXT: ret
+ %v = call <vscale x 32 x i8> @llvm.vp.ctpop.nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i1> %m, i32 %evl)
+ ret <vscale x 32 x i8> %v
+}
+
+define <vscale x 32 x i8> @vp_ctpop_nxv32i8_unmasked(<vscale x 32 x i8> %va, i32 zeroext %evl) {
+; CHECK-LABEL: vp_ctpop_nxv32i8_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
+; CHECK-NEXT: vsrl.vi v12, v8, 1
+; CHECK-NEXT: li a0, 85
+; CHECK-NEXT: vand.vx v12, v12, a0
+; CHECK-NEXT: vsub.vv v8, v8, v12
+; CHECK-NEXT: li a0, 51
+; CHECK-NEXT: vand.vx v12, v8, a0
+; CHECK-NEXT: vsrl.vi v8, v8, 2
+; CHECK-NEXT: vand.vx v8, v8, a0
+; CHECK-NEXT: vadd.vv v8, v12, v8
+; CHECK-NEXT: vsrl.vi v12, v8, 4
+; CHECK-NEXT: vadd.vv v8, v8, v12
+; CHECK-NEXT: vand.vi v8, v8, 15
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 32 x i1> poison, i1 true, i32 0
+ %m = shufflevector <vscale x 32 x i1> %head, <vscale x 32 x i1> poison, <vscale x 32 x i32> zeroinitializer
+ %v = call <vscale x 32 x i8> @llvm.vp.ctpop.nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i1> %m, i32 %evl)
+ ret <vscale x 32 x i8> %v
+}
+
+declare <vscale x 64 x i8> @llvm.vp.ctpop.nxv64i8(<vscale x 64 x i8>, <vscale x 64 x i1>, i32)
+
+define <vscale x 64 x i8> @vp_ctpop_nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vp_ctpop_nxv64i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma
+; CHECK-NEXT: vsrl.vi v16, v8, 1, v0.t
+; CHECK-NEXT: li a0, 85
+; CHECK-NEXT: vand.vx v16, v16, a0, v0.t
+; CHECK-NEXT: vsub.vv v8, v8, v16, v0.t
+; CHECK-NEXT: li a0, 51
+; CHECK-NEXT: vand.vx v16, v8, a0, v0.t
+; CHECK-NEXT: vsrl.vi v8, v8, 2, v0.t
+; CHECK-NEXT: vand.vx v8, v8, a0, v0.t
+; CHECK-NEXT: vadd.vv v8, v16, v8, v0.t
+; CHECK-NEXT: vsrl.vi v16, v8, 4, v0.t
+; CHECK-NEXT: vadd.vv v8, v8, v16, v0.t
+; CHECK-NEXT: vand.vi v8, v8, 15, v0.t
+; CHECK-NEXT: ret
+ %v = call <vscale x 64 x i8> @llvm.vp.ctpop.nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i1> %m, i32 %evl)
+ ret <vscale x 64 x i8> %v
+}
+
+define <vscale x 64 x i8> @vp_ctpop_nxv64i8_unmasked(<vscale x 64 x i8> %va, i32 zeroext %evl) {
+; CHECK-LABEL: vp_ctpop_nxv64i8_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma
+; CHECK-NEXT: vsrl.vi v16, v8, 1
+; CHECK-NEXT: li a0, 85
+; CHECK-NEXT: vand.vx v16, v16, a0
+; CHECK-NEXT: vsub.vv v8, v8, v16
+; CHECK-NEXT: li a0, 51
+; CHECK-NEXT: vand.vx v16, v8, a0
+; CHECK-NEXT: vsrl.vi v8, v8, 2
+; CHECK-NEXT: vand.vx v8, v8, a0
+; CHECK-NEXT: vadd.vv v8, v16, v8
+; CHECK-NEXT: vsrl.vi v16, v8, 4
+; CHECK-NEXT: vadd.vv v8, v8, v16
+; CHECK-NEXT: vand.vi v8, v8, 15
+; CHECK-NEXT: ret
+ %head = insertelement <vscale x 64 x i1> poison, i1 true, i32 0
+ %m = shufflevector <vscale x 64 x i1> %head, <vscale x 64 x i1> poison, <vscale x 64 x i32> zeroinitializer
+ %v = call <vscale x 64 x i8> @llvm.vp.ctpop.nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i1> %m, i32 %evl)
+ ret <vscale x 64 x i8> %v
+}
+
+declare <vscale x 1 x i16> @llvm.vp.ctpop.nxv1i16(<vscale x 1 x i16>, <vscale x 1 x i1>, i32)
+
+define <vscale x 1 x i16> @vp_ctpop_nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i1> %m, i32 zeroext %evl) {
+; RV32-LABEL: vp_ctpop_nxv1i16:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
+; RV32-NEXT: vsrl.vi v9, v8, 1, v0.t
+; RV32-NEXT: lui a0, 5
+; RV32-NEXT: addi a0, a0, 1365
+; RV32-NEXT: vand.vx v9, v9, a0, v0.t
+; RV32-NEXT: vsub.vv v8, v8, v9, v0.t
+; RV32-NEXT: lui a0, 3
+; RV32-NEXT: addi a0, a0, 819
+; RV32-NEXT: vand.vx v9, v8, a0, v0.t
+; RV32-NEXT: vsrl.vi v8, v8, 2, v0.t
+; RV32-NEXT: vand.vx v8, v8, a0, v0.t
+; RV32-NEXT: vadd.vv v8, v9, v8, v0.t
+; RV32-NEXT: vsrl.vi v9, v8, 4, v0.t
+; RV32-NEXT: vadd.vv v8, v8, v9, v0.t
+; RV32-NEXT: lui a0, 1
+; RV32-NEXT: addi a0, a0, -241
+; RV32-NEXT: vand.vx v8, v8, a0, v0.t
+; RV32-NEXT: li a0, 257
+; RV32-NEXT: vmul.vx v8, v8, a0, v0.t
+; RV32-NEXT: vsrl.vi v8, v8, 8, v0.t
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vp_ctpop_nxv1i16:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
+; RV64-NEXT: vsrl.vi v9, v8, 1, v0.t
+; RV64-NEXT: lui a0, 5
+; RV64-NEXT: addiw a0, a0, 1365
+; RV64-NEXT: vand.vx v9, v9, a0, v0.t
+; RV64-NEXT: vsub.vv v8, v8, v9, v0.t
+; RV64-NEXT: lui a0, 3
+; RV64-NEXT: addiw a0, a0, 819
+; RV64-NEXT: vand.vx v9, v8, a0, v0.t
+; RV64-NEXT: vsrl.vi v8, v8, 2, v0.t
+; RV64-NEXT: vand.vx v8, v8, a0, v0.t
+; RV64-NEXT: vadd.vv v8, v9, v8, v0.t
+; RV64-NEXT: vsrl.vi v9, v8, 4, v0.t
+; RV64-NEXT: vadd.vv v8, v8, v9, v0.t
+; RV64-NEXT: lui a0, 1
+; RV64-NEXT: addiw a0, a0, -241
+; RV64-NEXT: vand.vx v8, v8, a0, v0.t
+; RV64-NEXT: li a0, 257
+; RV64-NEXT: vmul.vx v8, v8, a0, v0.t
+; RV64-NEXT: vsrl.vi v8, v8, 8, v0.t
+; RV64-NEXT: ret
+ %v = call <vscale x 1 x i16> @llvm.vp.ctpop.nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i1> %m, i32 %evl)
+ ret <vscale x 1 x i16> %v
+}
+
+define <vscale x 1 x i16> @vp_ctpop_nxv1i16_unmasked(<vscale x 1 x i16> %va, i32 zeroext %evl) {
+; RV32-LABEL: vp_ctpop_nxv1i16_unmasked:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
+; RV32-NEXT: vsrl.vi v9, v8, 1
+; RV32-NEXT: lui a0, 5
+; RV32-NEXT: addi a0, a0, 1365
+; RV32-NEXT: vand.vx v9, v9, a0
+; RV32-NEXT: vsub.vv v8, v8, v9
+; RV32-NEXT: lui a0, 3
+; RV32-NEXT: addi a0, a0, 819
+; RV32-NEXT: vand.vx v9, v8, a0
+; RV32-NEXT: vsrl.vi v8, v8, 2
+; RV32-NEXT: vand.vx v8, v8, a0
+; RV32-NEXT: vadd.vv v8, v9, v8
+; RV32-NEXT: vsrl.vi v9, v8, 4
+; RV32-NEXT: vadd.vv v8, v8, v9
+; RV32-NEXT: lui a0, 1
+; RV32-NEXT: addi a0, a0, -241
+; RV32-NEXT: vand.vx v8, v8, a0
+; RV32-NEXT: li a0, 257
+; RV32-NEXT: vmul.vx v8, v8, a0
+; RV32-NEXT: vsrl.vi v8, v8, 8
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vp_ctpop_nxv1i16_unmasked:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
+; RV64-NEXT: vsrl.vi v9, v8, 1
+; RV64-NEXT: lui a0, 5
+; RV64-NEXT: addiw a0, a0, 1365
+; RV64-NEXT: vand.vx v9, v9, a0
+; RV64-NEXT: vsub.vv v8, v8, v9
+; RV64-NEXT: lui a0, 3
+; RV64-NEXT: addiw a0, a0, 819
+; RV64-NEXT: vand.vx v9, v8, a0
+; RV64-NEXT: vsrl.vi v8, v8, 2
+; RV64-NEXT: vand.vx v8, v8, a0
+; RV64-NEXT: vadd.vv v8, v9, v8
+; RV64-NEXT: vsrl.vi v9, v8, 4
+; RV64-NEXT: vadd.vv v8, v8, v9
+; RV64-NEXT: lui a0, 1
+; RV64-NEXT: addiw a0, a0, -241
+; RV64-NEXT: vand.vx v8, v8, a0
+; RV64-NEXT: li a0, 257
+; RV64-NEXT: vmul.vx v8, v8, a0
+; RV64-NEXT: vsrl.vi v8, v8, 8
+; RV64-NEXT: ret
+ %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
+ %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
+ %v = call <vscale x 1 x i16> @llvm.vp.ctpop.nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i1> %m, i32 %evl)
+ ret <vscale x 1 x i16> %v
+}
+
+declare <vscale x 2 x i16> @llvm.vp.ctpop.nxv2i16(<vscale x 2 x i16>, <vscale x 2 x i1>, i32)
+
+define <vscale x 2 x i16> @vp_ctpop_nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i1> %m, i32 zeroext %evl) {
+; RV32-LABEL: vp_ctpop_nxv2i16:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
+; RV32-NEXT: vsrl.vi v9, v8, 1, v0.t
+; RV32-NEXT: lui a0, 5
+; RV32-NEXT: addi a0, a0, 1365
+; RV32-NEXT: vand.vx v9, v9, a0, v0.t
+; RV32-NEXT: vsub.vv v8, v8, v9, v0.t
+; RV32-NEXT: lui a0, 3
+; RV32-NEXT: addi a0, a0, 819
+; RV32-NEXT: vand.vx v9, v8, a0, v0.t
+; RV32-NEXT: vsrl.vi v8, v8, 2, v0.t
+; RV32-NEXT: vand.vx v8, v8, a0, v0.t
+; RV32-NEXT: vadd.vv v8, v9, v8, v0.t
+; RV32-NEXT: vsrl.vi v9, v8, 4, v0.t
+; RV32-NEXT: vadd.vv v8, v8, v9, v0.t
+; RV32-NEXT: lui a0, 1
+; RV32-NEXT: addi a0, a0, -241
+; RV32-NEXT: vand.vx v8, v8, a0, v0.t
+; RV32-NEXT: li a0, 257
+; RV32-NEXT: vmul.vx v8, v8, a0, v0.t
+; RV32-NEXT: vsrl.vi v8, v8, 8, v0.t
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vp_ctpop_nxv2i16:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
+; RV64-NEXT: vsrl.vi v9, v8, 1, v0.t
+; RV64-NEXT: lui a0, 5
+; RV64-NEXT: addiw a0, a0, 1365
+; RV64-NEXT: vand.vx v9, v9, a0, v0.t
+; RV64-NEXT: vsub.vv v8, v8, v9, v0.t
+; RV64-NEXT: lui a0, 3
+; RV64-NEXT: addiw a0, a0, 819
+; RV64-NEXT: vand.vx v9, v8, a0, v0.t
+; RV64-NEXT: vsrl.vi v8, v8, 2, v0.t
+; RV64-NEXT: vand.vx v8, v8, a0, v0.t
+; RV64-NEXT: vadd.vv v8, v9, v8, v0.t
+; RV64-NEXT: vsrl.vi v9, v8, 4, v0.t
+; RV64-NEXT: vadd.vv v8, v8, v9, v0.t
+; RV64-NEXT: lui a0, 1
+; RV64-NEXT: addiw a0, a0, -241
+; RV64-NEXT: vand.vx v8, v8, a0, v0.t
+; RV64-NEXT: li a0, 257
+; RV64-NEXT: vmul.vx v8, v8, a0, v0.t
+; RV64-NEXT: vsrl.vi v8, v8, 8, v0.t
+; RV64-NEXT: ret
+ %v = call <vscale x 2 x i16> @llvm.vp.ctpop.nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i1> %m, i32 %evl)
+ ret <vscale x 2 x i16> %v
+}
+
+define <vscale x 2 x i16> @vp_ctpop_nxv2i16_unmasked(<vscale x 2 x i16> %va, i32 zeroext %evl) {
+; RV32-LABEL: vp_ctpop_nxv2i16_unmasked:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
+; RV32-NEXT: vsrl.vi v9, v8, 1
+; RV32-NEXT: lui a0, 5
+; RV32-NEXT: addi a0, a0, 1365
+; RV32-NEXT: vand.vx v9, v9, a0
+; RV32-NEXT: vsub.vv v8, v8, v9
+; RV32-NEXT: lui a0, 3
+; RV32-NEXT: addi a0, a0, 819
+; RV32-NEXT: vand.vx v9, v8, a0
+; RV32-NEXT: vsrl.vi v8, v8, 2
+; RV32-NEXT: vand.vx v8, v8, a0
+; RV32-NEXT: vadd.vv v8, v9, v8
+; RV32-NEXT: vsrl.vi v9, v8, 4
+; RV32-NEXT: vadd.vv v8, v8, v9
+; RV32-NEXT: lui a0, 1
+; RV32-NEXT: addi a0, a0, -241
+; RV32-NEXT: vand.vx v8, v8, a0
+; RV32-NEXT: li a0, 257
+; RV32-NEXT: vmul.vx v8, v8, a0
+; RV32-NEXT: vsrl.vi v8, v8, 8
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vp_ctpop_nxv2i16_unmasked:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
+; RV64-NEXT: vsrl.vi v9, v8, 1
+; RV64-NEXT: lui a0, 5
+; RV64-NEXT: addiw a0, a0, 1365
+; RV64-NEXT: vand.vx v9, v9, a0
+; RV64-NEXT: vsub.vv v8, v8, v9
+; RV64-NEXT: lui a0, 3
+; RV64-NEXT: addiw a0, a0, 819
+; RV64-NEXT: vand.vx v9, v8, a0
+; RV64-NEXT: vsrl.vi v8, v8, 2
+; RV64-NEXT: vand.vx v8, v8, a0
+; RV64-NEXT: vadd.vv v8, v9, v8
+; RV64-NEXT: vsrl.vi v9, v8, 4
+; RV64-NEXT: vadd.vv v8, v8, v9
+; RV64-NEXT: lui a0, 1
+; RV64-NEXT: addiw a0, a0, -241
+; RV64-NEXT: vand.vx v8, v8, a0
+; RV64-NEXT: li a0, 257
+; RV64-NEXT: vmul.vx v8, v8, a0
+; RV64-NEXT: vsrl.vi v8, v8, 8
+; RV64-NEXT: ret
+ %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
+ %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
+ %v = call <vscale x 2 x i16> @llvm.vp.ctpop.nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i1> %m, i32 %evl)
+ ret <vscale x 2 x i16> %v
+}
+
+declare <vscale x 4 x i16> @llvm.vp.ctpop.nxv4i16(<vscale x 4 x i16>, <vscale x 4 x i1>, i32)
+
+define <vscale x 4 x i16> @vp_ctpop_nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i1> %m, i32 zeroext %evl) {
+; RV32-LABEL: vp_ctpop_nxv4i16:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetvli zero, a0, e16, m1, ta, ma
+; RV32-NEXT: vsrl.vi v9, v8, 1, v0.t
+; RV32-NEXT: lui a0, 5
+; RV32-NEXT: addi a0, a0, 1365
+; RV32-NEXT: vand.vx v9, v9, a0, v0.t
+; RV32-NEXT: vsub.vv v8, v8, v9, v0.t
+; RV32-NEXT: lui a0, 3
+; RV32-NEXT: addi a0, a0, 819
+; RV32-NEXT: vand.vx v9, v8, a0, v0.t
+; RV32-NEXT: vsrl.vi v8, v8, 2, v0.t
+; RV32-NEXT: vand.vx v8, v8, a0, v0.t
+; RV32-NEXT: vadd.vv v8, v9, v8, v0.t
+; RV32-NEXT: vsrl.vi v9, v8, 4, v0.t
+; RV32-NEXT: vadd.vv v8, v8, v9, v0.t
+; RV32-NEXT: lui a0, 1
+; RV32-NEXT: addi a0, a0, -241
+; RV32-NEXT: vand.vx v8, v8, a0, v0.t
+; RV32-NEXT: li a0, 257
+; RV32-NEXT: vmul.vx v8, v8, a0, v0.t
+; RV32-NEXT: vsrl.vi v8, v8, 8, v0.t
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vp_ctpop_nxv4i16:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetvli zero, a0, e16, m1, ta, ma
+; RV64-NEXT: vsrl.vi v9, v8, 1, v0.t
+; RV64-NEXT: lui a0, 5
+; RV64-NEXT: addiw a0, a0, 1365
+; RV64-NEXT: vand.vx v9, v9, a0, v0.t
+; RV64-NEXT: vsub.vv v8, v8, v9, v0.t
+; RV64-NEXT: lui a0, 3
+; RV64-NEXT: addiw a0, a0, 819
+; RV64-NEXT: vand.vx v9, v8, a0, v0.t
+; RV64-NEXT: vsrl.vi v8, v8, 2, v0.t
+; RV64-NEXT: vand.vx v8, v8, a0, v0.t
+; RV64-NEXT: vadd.vv v8, v9, v8, v0.t
+; RV64-NEXT: vsrl.vi v9, v8, 4, v0.t
+; RV64-NEXT: vadd.vv v8, v8, v9, v0.t
+; RV64-NEXT: lui a0, 1
+; RV64-NEXT: addiw a0, a0, -241
+; RV64-NEXT: vand.vx v8, v8, a0, v0.t
+; RV64-NEXT: li a0, 257
+; RV64-NEXT: vmul.vx v8, v8, a0, v0.t
+; RV64-NEXT: vsrl.vi v8, v8, 8, v0.t
+; RV64-NEXT: ret
+ %v = call <vscale x 4 x i16> @llvm.vp.ctpop.nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i1> %m, i32 %evl)
+ ret <vscale x 4 x i16> %v
+}
+
+define <vscale x 4 x i16> @vp_ctpop_nxv4i16_unmasked(<vscale x 4 x i16> %va, i32 zeroext %evl) {
+; RV32-LABEL: vp_ctpop_nxv4i16_unmasked:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetvli zero, a0, e16, m1, ta, ma
+; RV32-NEXT: vsrl.vi v9, v8, 1
+; RV32-NEXT: lui a0, 5
+; RV32-NEXT: addi a0, a0, 1365
+; RV32-NEXT: vand.vx v9, v9, a0
+; RV32-NEXT: vsub.vv v8, v8, v9
+; RV32-NEXT: lui a0, 3
+; RV32-NEXT: addi a0, a0, 819
+; RV32-NEXT: vand.vx v9, v8, a0
+; RV32-NEXT: vsrl.vi v8, v8, 2
+; RV32-NEXT: vand.vx v8, v8, a0
+; RV32-NEXT: vadd.vv v8, v9, v8
+; RV32-NEXT: vsrl.vi v9, v8, 4
+; RV32-NEXT: vadd.vv v8, v8, v9
+; RV32-NEXT: lui a0, 1
+; RV32-NEXT: addi a0, a0, -241
+; RV32-NEXT: vand.vx v8, v8, a0
+; RV32-NEXT: li a0, 257
+; RV32-NEXT: vmul.vx v8, v8, a0
+; RV32-NEXT: vsrl.vi v8, v8, 8
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vp_ctpop_nxv4i16_unmasked:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetvli zero, a0, e16, m1, ta, ma
+; RV64-NEXT: vsrl.vi v9, v8, 1
+; RV64-NEXT: lui a0, 5
+; RV64-NEXT: addiw a0, a0, 1365
+; RV64-NEXT: vand.vx v9, v9, a0
+; RV64-NEXT: vsub.vv v8, v8, v9
+; RV64-NEXT: lui a0, 3
+; RV64-NEXT: addiw a0, a0, 819
+; RV64-NEXT: vand.vx v9, v8, a0
+; RV64-NEXT: vsrl.vi v8, v8, 2
+; RV64-NEXT: vand.vx v8, v8, a0
+; RV64-NEXT: vadd.vv v8, v9, v8
+; RV64-NEXT: vsrl.vi v9, v8, 4
+; RV64-NEXT: vadd.vv v8, v8, v9
+; RV64-NEXT: lui a0, 1
+; RV64-NEXT: addiw a0, a0, -241
+; RV64-NEXT: vand.vx v8, v8, a0
+; RV64-NEXT: li a0, 257
+; RV64-NEXT: vmul.vx v8, v8, a0
+; RV64-NEXT: vsrl.vi v8, v8, 8
+; RV64-NEXT: ret
+ %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
+ %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
+ %v = call <vscale x 4 x i16> @llvm.vp.ctpop.nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i1> %m, i32 %evl)
+ ret <vscale x 4 x i16> %v
+}
+
+declare <vscale x 8 x i16> @llvm.vp.ctpop.nxv8i16(<vscale x 8 x i16>, <vscale x 8 x i1>, i32)
+
+define <vscale x 8 x i16> @vp_ctpop_nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i1> %m, i32 zeroext %evl) {
+; RV32-LABEL: vp_ctpop_nxv8i16:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetvli zero, a0, e16, m2, ta, ma
+; RV32-NEXT: vsrl.vi v10, v8, 1, v0.t
+; RV32-NEXT: lui a0, 5
+; RV32-NEXT: addi a0, a0, 1365
+; RV32-NEXT: vand.vx v10, v10, a0, v0.t
+; RV32-NEXT: vsub.vv v8, v8, v10, v0.t
+; RV32-NEXT: lui a0, 3
+; RV32-NEXT: addi a0, a0, 819
+; RV32-NEXT: vand.vx v10, v8, a0, v0.t
+; RV32-NEXT: vsrl.vi v8, v8, 2, v0.t
+; RV32-NEXT: vand.vx v8, v8, a0, v0.t
+; RV32-NEXT: vadd.vv v8, v10, v8, v0.t
+; RV32-NEXT: vsrl.vi v10, v8, 4, v0.t
+; RV32-NEXT: vadd.vv v8, v8, v10, v0.t
+; RV32-NEXT: lui a0, 1
+; RV32-NEXT: addi a0, a0, -241
+; RV32-NEXT: vand.vx v8, v8, a0, v0.t
+; RV32-NEXT: li a0, 257
+; RV32-NEXT: vmul.vx v8, v8, a0, v0.t
+; RV32-NEXT: vsrl.vi v8, v8, 8, v0.t
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vp_ctpop_nxv8i16:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetvli zero, a0, e16, m2, ta, ma
+; RV64-NEXT: vsrl.vi v10, v8, 1, v0.t
+; RV64-NEXT: lui a0, 5
+; RV64-NEXT: addiw a0, a0, 1365
+; RV64-NEXT: vand.vx v10, v10, a0, v0.t
+; RV64-NEXT: vsub.vv v8, v8, v10, v0.t
+; RV64-NEXT: lui a0, 3
+; RV64-NEXT: addiw a0, a0, 819
+; RV64-NEXT: vand.vx v10, v8, a0, v0.t
+; RV64-NEXT: vsrl.vi v8, v8, 2, v0.t
+; RV64-NEXT: vand.vx v8, v8, a0, v0.t
+; RV64-NEXT: vadd.vv v8, v10, v8, v0.t
+; RV64-NEXT: vsrl.vi v10, v8, 4, v0.t
+; RV64-NEXT: vadd.vv v8, v8, v10, v0.t
+; RV64-NEXT: lui a0, 1
+; RV64-NEXT: addiw a0, a0, -241
+; RV64-NEXT: vand.vx v8, v8, a0, v0.t
+; RV64-NEXT: li a0, 257
+; RV64-NEXT: vmul.vx v8, v8, a0, v0.t
+; RV64-NEXT: vsrl.vi v8, v8, 8, v0.t
+; RV64-NEXT: ret
+ %v = call <vscale x 8 x i16> @llvm.vp.ctpop.nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i1> %m, i32 %evl)
+ ret <vscale x 8 x i16> %v
+}
+
+define <vscale x 8 x i16> @vp_ctpop_nxv8i16_unmasked(<vscale x 8 x i16> %va, i32 zeroext %evl) {
+; RV32-LABEL: vp_ctpop_nxv8i16_unmasked:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetvli zero, a0, e16, m2, ta, ma
+; RV32-NEXT: vsrl.vi v10, v8, 1
+; RV32-NEXT: lui a0, 5
+; RV32-NEXT: addi a0, a0, 1365
+; RV32-NEXT: vand.vx v10, v10, a0
+; RV32-NEXT: vsub.vv v8, v8, v10
+; RV32-NEXT: lui a0, 3
+; RV32-NEXT: addi a0, a0, 819
+; RV32-NEXT: vand.vx v10, v8, a0
+; RV32-NEXT: vsrl.vi v8, v8, 2
+; RV32-NEXT: vand.vx v8, v8, a0
+; RV32-NEXT: vadd.vv v8, v10, v8
+; RV32-NEXT: vsrl.vi v10, v8, 4
+; RV32-NEXT: vadd.vv v8, v8, v10
+; RV32-NEXT: lui a0, 1
+; RV32-NEXT: addi a0, a0, -241
+; RV32-NEXT: vand.vx v8, v8, a0
+; RV32-NEXT: li a0, 257
+; RV32-NEXT: vmul.vx v8, v8, a0
+; RV32-NEXT: vsrl.vi v8, v8, 8
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vp_ctpop_nxv8i16_unmasked:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetvli zero, a0, e16, m2, ta, ma
+; RV64-NEXT: vsrl.vi v10, v8, 1
+; RV64-NEXT: lui a0, 5
+; RV64-NEXT: addiw a0, a0, 1365
+; RV64-NEXT: vand.vx v10, v10, a0
+; RV64-NEXT: vsub.vv v8, v8, v10
+; RV64-NEXT: lui a0, 3
+; RV64-NEXT: addiw a0, a0, 819
+; RV64-NEXT: vand.vx v10, v8, a0
+; RV64-NEXT: vsrl.vi v8, v8, 2
+; RV64-NEXT: vand.vx v8, v8, a0
+; RV64-NEXT: vadd.vv v8, v10, v8
+; RV64-NEXT: vsrl.vi v10, v8, 4
+; RV64-NEXT: vadd.vv v8, v8, v10
+; RV64-NEXT: lui a0, 1
+; RV64-NEXT: addiw a0, a0, -241
+; RV64-NEXT: vand.vx v8, v8, a0
+; RV64-NEXT: li a0, 257
+; RV64-NEXT: vmul.vx v8, v8, a0
+; RV64-NEXT: vsrl.vi v8, v8, 8
+; RV64-NEXT: ret
+ %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
+ %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
+ %v = call <vscale x 8 x i16> @llvm.vp.ctpop.nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i1> %m, i32 %evl)
+ ret <vscale x 8 x i16> %v
+}
+
+declare <vscale x 16 x i16> @llvm.vp.ctpop.nxv16i16(<vscale x 16 x i16>, <vscale x 16 x i1>, i32)
+
+define <vscale x 16 x i16> @vp_ctpop_nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i1> %m, i32 zeroext %evl) {
+; RV32-LABEL: vp_ctpop_nxv16i16:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetvli zero, a0, e16, m4, ta, ma
+; RV32-NEXT: vsrl.vi v12, v8, 1, v0.t
+; RV32-NEXT: lui a0, 5
+; RV32-NEXT: addi a0, a0, 1365
+; RV32-NEXT: vand.vx v12, v12, a0, v0.t
+; RV32-NEXT: vsub.vv v8, v8, v12, v0.t
+; RV32-NEXT: lui a0, 3
+; RV32-NEXT: addi a0, a0, 819
+; RV32-NEXT: vand.vx v12, v8, a0, v0.t
+; RV32-NEXT: vsrl.vi v8, v8, 2, v0.t
+; RV32-NEXT: vand.vx v8, v8, a0, v0.t
+; RV32-NEXT: vadd.vv v8, v12, v8, v0.t
+; RV32-NEXT: vsrl.vi v12, v8, 4, v0.t
+; RV32-NEXT: vadd.vv v8, v8, v12, v0.t
+; RV32-NEXT: lui a0, 1
+; RV32-NEXT: addi a0, a0, -241
+; RV32-NEXT: vand.vx v8, v8, a0, v0.t
+; RV32-NEXT: li a0, 257
+; RV32-NEXT: vmul.vx v8, v8, a0, v0.t
+; RV32-NEXT: vsrl.vi v8, v8, 8, v0.t
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vp_ctpop_nxv16i16:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetvli zero, a0, e16, m4, ta, ma
+; RV64-NEXT: vsrl.vi v12, v8, 1, v0.t
+; RV64-NEXT: lui a0, 5
+; RV64-NEXT: addiw a0, a0, 1365
+; RV64-NEXT: vand.vx v12, v12, a0, v0.t
+; RV64-NEXT: vsub.vv v8, v8, v12, v0.t
+; RV64-NEXT: lui a0, 3
+; RV64-NEXT: addiw a0, a0, 819
+; RV64-NEXT: vand.vx v12, v8, a0, v0.t
+; RV64-NEXT: vsrl.vi v8, v8, 2, v0.t
+; RV64-NEXT: vand.vx v8, v8, a0, v0.t
+; RV64-NEXT: vadd.vv v8, v12, v8, v0.t
+; RV64-NEXT: vsrl.vi v12, v8, 4, v0.t
+; RV64-NEXT: vadd.vv v8, v8, v12, v0.t
+; RV64-NEXT: lui a0, 1
+; RV64-NEXT: addiw a0, a0, -241
+; RV64-NEXT: vand.vx v8, v8, a0, v0.t
+; RV64-NEXT: li a0, 257
+; RV64-NEXT: vmul.vx v8, v8, a0, v0.t
+; RV64-NEXT: vsrl.vi v8, v8, 8, v0.t
+; RV64-NEXT: ret
+ %v = call <vscale x 16 x i16> @llvm.vp.ctpop.nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i1> %m, i32 %evl)
+ ret <vscale x 16 x i16> %v
+}
+
+define <vscale x 16 x i16> @vp_ctpop_nxv16i16_unmasked(<vscale x 16 x i16> %va, i32 zeroext %evl) {
+; RV32-LABEL: vp_ctpop_nxv16i16_unmasked:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetvli zero, a0, e16, m4, ta, ma
+; RV32-NEXT: vsrl.vi v12, v8, 1
+; RV32-NEXT: lui a0, 5
+; RV32-NEXT: addi a0, a0, 1365
+; RV32-NEXT: vand.vx v12, v12, a0
+; RV32-NEXT: vsub.vv v8, v8, v12
+; RV32-NEXT: lui a0, 3
+; RV32-NEXT: addi a0, a0, 819
+; RV32-NEXT: vand.vx v12, v8, a0
+; RV32-NEXT: vsrl.vi v8, v8, 2
+; RV32-NEXT: vand.vx v8, v8, a0
+; RV32-NEXT: vadd.vv v8, v12, v8
+; RV32-NEXT: vsrl.vi v12, v8, 4
+; RV32-NEXT: vadd.vv v8, v8, v12
+; RV32-NEXT: lui a0, 1
+; RV32-NEXT: addi a0, a0, -241
+; RV32-NEXT: vand.vx v8, v8, a0
+; RV32-NEXT: li a0, 257
+; RV32-NEXT: vmul.vx v8, v8, a0
+; RV32-NEXT: vsrl.vi v8, v8, 8
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vp_ctpop_nxv16i16_unmasked:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetvli zero, a0, e16, m4, ta, ma
+; RV64-NEXT: vsrl.vi v12, v8, 1
+; RV64-NEXT: lui a0, 5
+; RV64-NEXT: addiw a0, a0, 1365
+; RV64-NEXT: vand.vx v12, v12, a0
+; RV64-NEXT: vsub.vv v8, v8, v12
+; RV64-NEXT: lui a0, 3
+; RV64-NEXT: addiw a0, a0, 819
+; RV64-NEXT: vand.vx v12, v8, a0
+; RV64-NEXT: vsrl.vi v8, v8, 2
+; RV64-NEXT: vand.vx v8, v8, a0
+; RV64-NEXT: vadd.vv v8, v12, v8
+; RV64-NEXT: vsrl.vi v12, v8, 4
+; RV64-NEXT: vadd.vv v8, v8, v12
+; RV64-NEXT: lui a0, 1
+; RV64-NEXT: addiw a0, a0, -241
+; RV64-NEXT: vand.vx v8, v8, a0
+; RV64-NEXT: li a0, 257
+; RV64-NEXT: vmul.vx v8, v8, a0
+; RV64-NEXT: vsrl.vi v8, v8, 8
+; RV64-NEXT: ret
+ %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
+ %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
+ %v = call <vscale x 16 x i16> @llvm.vp.ctpop.nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i1> %m, i32 %evl)
+ ret <vscale x 16 x i16> %v
+}
+
+declare <vscale x 32 x i16> @llvm.vp.ctpop.nxv32i16(<vscale x 32 x i16>, <vscale x 32 x i1>, i32)
+
+define <vscale x 32 x i16> @vp_ctpop_nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i1> %m, i32 zeroext %evl) {
+; RV32-LABEL: vp_ctpop_nxv32i16:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetvli zero, a0, e16, m8, ta, ma
+; RV32-NEXT: vsrl.vi v16, v8, 1, v0.t
+; RV32-NEXT: lui a0, 5
+; RV32-NEXT: addi a0, a0, 1365
+; RV32-NEXT: vand.vx v16, v16, a0, v0.t
+; RV32-NEXT: vsub.vv v8, v8, v16, v0.t
+; RV32-NEXT: lui a0, 3
+; RV32-NEXT: addi a0, a0, 819
+; RV32-NEXT: vand.vx v16, v8, a0, v0.t
+; RV32-NEXT: vsrl.vi v8, v8, 2, v0.t
+; RV32-NEXT: vand.vx v8, v8, a0, v0.t
+; RV32-NEXT: vadd.vv v8, v16, v8, v0.t
+; RV32-NEXT: vsrl.vi v16, v8, 4, v0.t
+; RV32-NEXT: vadd.vv v8, v8, v16, v0.t
+; RV32-NEXT: lui a0, 1
+; RV32-NEXT: addi a0, a0, -241
+; RV32-NEXT: vand.vx v8, v8, a0, v0.t
+; RV32-NEXT: li a0, 257
+; RV32-NEXT: vmul.vx v8, v8, a0, v0.t
+; RV32-NEXT: vsrl.vi v8, v8, 8, v0.t
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vp_ctpop_nxv32i16:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetvli zero, a0, e16, m8, ta, ma
+; RV64-NEXT: vsrl.vi v16, v8, 1, v0.t
+; RV64-NEXT: lui a0, 5
+; RV64-NEXT: addiw a0, a0, 1365
+; RV64-NEXT: vand.vx v16, v16, a0, v0.t
+; RV64-NEXT: vsub.vv v8, v8, v16, v0.t
+; RV64-NEXT: lui a0, 3
+; RV64-NEXT: addiw a0, a0, 819
+; RV64-NEXT: vand.vx v16, v8, a0, v0.t
+; RV64-NEXT: vsrl.vi v8, v8, 2, v0.t
+; RV64-NEXT: vand.vx v8, v8, a0, v0.t
+; RV64-NEXT: vadd.vv v8, v16, v8, v0.t
+; RV64-NEXT: vsrl.vi v16, v8, 4, v0.t
+; RV64-NEXT: vadd.vv v8, v8, v16, v0.t
+; RV64-NEXT: lui a0, 1
+; RV64-NEXT: addiw a0, a0, -241
+; RV64-NEXT: vand.vx v8, v8, a0, v0.t
+; RV64-NEXT: li a0, 257
+; RV64-NEXT: vmul.vx v8, v8, a0, v0.t
+; RV64-NEXT: vsrl.vi v8, v8, 8, v0.t
+; RV64-NEXT: ret
+ %v = call <vscale x 32 x i16> @llvm.vp.ctpop.nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i1> %m, i32 %evl)
+ ret <vscale x 32 x i16> %v
+}
+
+define <vscale x 32 x i16> @vp_ctpop_nxv32i16_unmasked(<vscale x 32 x i16> %va, i32 zeroext %evl) {
+; RV32-LABEL: vp_ctpop_nxv32i16_unmasked:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetvli zero, a0, e16, m8, ta, ma
+; RV32-NEXT: vsrl.vi v16, v8, 1
+; RV32-NEXT: lui a0, 5
+; RV32-NEXT: addi a0, a0, 1365
+; RV32-NEXT: vand.vx v16, v16, a0
+; RV32-NEXT: vsub.vv v8, v8, v16
+; RV32-NEXT: lui a0, 3
+; RV32-NEXT: addi a0, a0, 819
+; RV32-NEXT: vand.vx v16, v8, a0
+; RV32-NEXT: vsrl.vi v8, v8, 2
+; RV32-NEXT: vand.vx v8, v8, a0
+; RV32-NEXT: vadd.vv v8, v16, v8
+; RV32-NEXT: vsrl.vi v16, v8, 4
+; RV32-NEXT: vadd.vv v8, v8, v16
+; RV32-NEXT: lui a0, 1
+; RV32-NEXT: addi a0, a0, -241
+; RV32-NEXT: vand.vx v8, v8, a0
+; RV32-NEXT: li a0, 257
+; RV32-NEXT: vmul.vx v8, v8, a0
+; RV32-NEXT: vsrl.vi v8, v8, 8
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vp_ctpop_nxv32i16_unmasked:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetvli zero, a0, e16, m8, ta, ma
+; RV64-NEXT: vsrl.vi v16, v8, 1
+; RV64-NEXT: lui a0, 5
+; RV64-NEXT: addiw a0, a0, 1365
+; RV64-NEXT: vand.vx v16, v16, a0
+; RV64-NEXT: vsub.vv v8, v8, v16
+; RV64-NEXT: lui a0, 3
+; RV64-NEXT: addiw a0, a0, 819
+; RV64-NEXT: vand.vx v16, v8, a0
+; RV64-NEXT: vsrl.vi v8, v8, 2
+; RV64-NEXT: vand.vx v8, v8, a0
+; RV64-NEXT: vadd.vv v8, v16, v8
+; RV64-NEXT: vsrl.vi v16, v8, 4
+; RV64-NEXT: vadd.vv v8, v8, v16
+; RV64-NEXT: lui a0, 1
+; RV64-NEXT: addiw a0, a0, -241
+; RV64-NEXT: vand.vx v8, v8, a0
+; RV64-NEXT: li a0, 257
+; RV64-NEXT: vmul.vx v8, v8, a0
+; RV64-NEXT: vsrl.vi v8, v8, 8
+; RV64-NEXT: ret
+ %head = insertelement <vscale x 32 x i1> poison, i1 true, i32 0
+ %m = shufflevector <vscale x 32 x i1> %head, <vscale x 32 x i1> poison, <vscale x 32 x i32> zeroinitializer
+ %v = call <vscale x 32 x i16> @llvm.vp.ctpop.nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i1> %m, i32 %evl)
+ ret <vscale x 32 x i16> %v
+}
+
+declare <vscale x 1 x i32> @llvm.vp.ctpop.nxv1i32(<vscale x 1 x i32>, <vscale x 1 x i1>, i32)
+
+define <vscale x 1 x i32> @vp_ctpop_nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i1> %m, i32 zeroext %evl) {
+; RV32-LABEL: vp_ctpop_nxv1i32:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
+; RV32-NEXT: vsrl.vi v9, v8, 1, v0.t
+; RV32-NEXT: lui a0, 349525
+; RV32-NEXT: addi a0, a0, 1365
+; RV32-NEXT: vand.vx v9, v9, a0, v0.t
+; RV32-NEXT: vsub.vv v8, v8, v9, v0.t
+; RV32-NEXT: lui a0, 209715
+; RV32-NEXT: addi a0, a0, 819
+; RV32-NEXT: vand.vx v9, v8, a0, v0.t
+; RV32-NEXT: vsrl.vi v8, v8, 2, v0.t
+; RV32-NEXT: vand.vx v8, v8, a0, v0.t
+; RV32-NEXT: vadd.vv v8, v9, v8, v0.t
+; RV32-NEXT: vsrl.vi v9, v8, 4, v0.t
+; RV32-NEXT: vadd.vv v8, v8, v9, v0.t
+; RV32-NEXT: lui a0, 61681
+; RV32-NEXT: addi a0, a0, -241
+; RV32-NEXT: vand.vx v8, v8, a0, v0.t
+; RV32-NEXT: lui a0, 4112
+; RV32-NEXT: addi a0, a0, 257
+; RV32-NEXT: vmul.vx v8, v8, a0, v0.t
+; RV32-NEXT: vsrl.vi v8, v8, 24, v0.t
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vp_ctpop_nxv1i32:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
+; RV64-NEXT: vsrl.vi v9, v8, 1, v0.t
+; RV64-NEXT: lui a0, 349525
+; RV64-NEXT: addiw a0, a0, 1365
+; RV64-NEXT: vand.vx v9, v9, a0, v0.t
+; RV64-NEXT: vsub.vv v8, v8, v9, v0.t
+; RV64-NEXT: lui a0, 209715
+; RV64-NEXT: addiw a0, a0, 819
+; RV64-NEXT: vand.vx v9, v8, a0, v0.t
+; RV64-NEXT: vsrl.vi v8, v8, 2, v0.t
+; RV64-NEXT: vand.vx v8, v8, a0, v0.t
+; RV64-NEXT: vadd.vv v8, v9, v8, v0.t
+; RV64-NEXT: vsrl.vi v9, v8, 4, v0.t
+; RV64-NEXT: vadd.vv v8, v8, v9, v0.t
+; RV64-NEXT: lui a0, 61681
+; RV64-NEXT: addiw a0, a0, -241
+; RV64-NEXT: vand.vx v8, v8, a0, v0.t
+; RV64-NEXT: lui a0, 4112
+; RV64-NEXT: addiw a0, a0, 257
+; RV64-NEXT: vmul.vx v8, v8, a0, v0.t
+; RV64-NEXT: vsrl.vi v8, v8, 24, v0.t
+; RV64-NEXT: ret
+ %v = call <vscale x 1 x i32> @llvm.vp.ctpop.nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i1> %m, i32 %evl)
+ ret <vscale x 1 x i32> %v
+}
+
+define <vscale x 1 x i32> @vp_ctpop_nxv1i32_unmasked(<vscale x 1 x i32> %va, i32 zeroext %evl) {
+; RV32-LABEL: vp_ctpop_nxv1i32_unmasked:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
+; RV32-NEXT: vsrl.vi v9, v8, 1
+; RV32-NEXT: lui a0, 349525
+; RV32-NEXT: addi a0, a0, 1365
+; RV32-NEXT: vand.vx v9, v9, a0
+; RV32-NEXT: vsub.vv v8, v8, v9
+; RV32-NEXT: lui a0, 209715
+; RV32-NEXT: addi a0, a0, 819
+; RV32-NEXT: vand.vx v9, v8, a0
+; RV32-NEXT: vsrl.vi v8, v8, 2
+; RV32-NEXT: vand.vx v8, v8, a0
+; RV32-NEXT: vadd.vv v8, v9, v8
+; RV32-NEXT: vsrl.vi v9, v8, 4
+; RV32-NEXT: vadd.vv v8, v8, v9
+; RV32-NEXT: lui a0, 61681
+; RV32-NEXT: addi a0, a0, -241
+; RV32-NEXT: vand.vx v8, v8, a0
+; RV32-NEXT: lui a0, 4112
+; RV32-NEXT: addi a0, a0, 257
+; RV32-NEXT: vmul.vx v8, v8, a0
+; RV32-NEXT: vsrl.vi v8, v8, 24
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vp_ctpop_nxv1i32_unmasked:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
+; RV64-NEXT: vsrl.vi v9, v8, 1
+; RV64-NEXT: lui a0, 349525
+; RV64-NEXT: addiw a0, a0, 1365
+; RV64-NEXT: vand.vx v9, v9, a0
+; RV64-NEXT: vsub.vv v8, v8, v9
+; RV64-NEXT: lui a0, 209715
+; RV64-NEXT: addiw a0, a0, 819
+; RV64-NEXT: vand.vx v9, v8, a0
+; RV64-NEXT: vsrl.vi v8, v8, 2
+; RV64-NEXT: vand.vx v8, v8, a0
+; RV64-NEXT: vadd.vv v8, v9, v8
+; RV64-NEXT: vsrl.vi v9, v8, 4
+; RV64-NEXT: vadd.vv v8, v8, v9
+; RV64-NEXT: lui a0, 61681
+; RV64-NEXT: addiw a0, a0, -241
+; RV64-NEXT: vand.vx v8, v8, a0
+; RV64-NEXT: lui a0, 4112
+; RV64-NEXT: addiw a0, a0, 257
+; RV64-NEXT: vmul.vx v8, v8, a0
+; RV64-NEXT: vsrl.vi v8, v8, 24
+; RV64-NEXT: ret
+ %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
+ %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
+ %v = call <vscale x 1 x i32> @llvm.vp.ctpop.nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i1> %m, i32 %evl)
+ ret <vscale x 1 x i32> %v
+}
+
+declare <vscale x 2 x i32> @llvm.vp.ctpop.nxv2i32(<vscale x 2 x i32>, <vscale x 2 x i1>, i32)
+
+define <vscale x 2 x i32> @vp_ctpop_nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i1> %m, i32 zeroext %evl) {
+; RV32-LABEL: vp_ctpop_nxv2i32:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetvli zero, a0, e32, m1, ta, ma
+; RV32-NEXT: vsrl.vi v9, v8, 1, v0.t
+; RV32-NEXT: lui a0, 349525
+; RV32-NEXT: addi a0, a0, 1365
+; RV32-NEXT: vand.vx v9, v9, a0, v0.t
+; RV32-NEXT: vsub.vv v8, v8, v9, v0.t
+; RV32-NEXT: lui a0, 209715
+; RV32-NEXT: addi a0, a0, 819
+; RV32-NEXT: vand.vx v9, v8, a0, v0.t
+; RV32-NEXT: vsrl.vi v8, v8, 2, v0.t
+; RV32-NEXT: vand.vx v8, v8, a0, v0.t
+; RV32-NEXT: vadd.vv v8, v9, v8, v0.t
+; RV32-NEXT: vsrl.vi v9, v8, 4, v0.t
+; RV32-NEXT: vadd.vv v8, v8, v9, v0.t
+; RV32-NEXT: lui a0, 61681
+; RV32-NEXT: addi a0, a0, -241
+; RV32-NEXT: vand.vx v8, v8, a0, v0.t
+; RV32-NEXT: lui a0, 4112
+; RV32-NEXT: addi a0, a0, 257
+; RV32-NEXT: vmul.vx v8, v8, a0, v0.t
+; RV32-NEXT: vsrl.vi v8, v8, 24, v0.t
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vp_ctpop_nxv2i32:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetvli zero, a0, e32, m1, ta, ma
+; RV64-NEXT: vsrl.vi v9, v8, 1, v0.t
+; RV64-NEXT: lui a0, 349525
+; RV64-NEXT: addiw a0, a0, 1365
+; RV64-NEXT: vand.vx v9, v9, a0, v0.t
+; RV64-NEXT: vsub.vv v8, v8, v9, v0.t
+; RV64-NEXT: lui a0, 209715
+; RV64-NEXT: addiw a0, a0, 819
+; RV64-NEXT: vand.vx v9, v8, a0, v0.t
+; RV64-NEXT: vsrl.vi v8, v8, 2, v0.t
+; RV64-NEXT: vand.vx v8, v8, a0, v0.t
+; RV64-NEXT: vadd.vv v8, v9, v8, v0.t
+; RV64-NEXT: vsrl.vi v9, v8, 4, v0.t
+; RV64-NEXT: vadd.vv v8, v8, v9, v0.t
+; RV64-NEXT: lui a0, 61681
+; RV64-NEXT: addiw a0, a0, -241
+; RV64-NEXT: vand.vx v8, v8, a0, v0.t
+; RV64-NEXT: lui a0, 4112
+; RV64-NEXT: addiw a0, a0, 257
+; RV64-NEXT: vmul.vx v8, v8, a0, v0.t
+; RV64-NEXT: vsrl.vi v8, v8, 24, v0.t
+; RV64-NEXT: ret
+ %v = call <vscale x 2 x i32> @llvm.vp.ctpop.nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i1> %m, i32 %evl)
+ ret <vscale x 2 x i32> %v
+}
+
+define <vscale x 2 x i32> @vp_ctpop_nxv2i32_unmasked(<vscale x 2 x i32> %va, i32 zeroext %evl) {
+; RV32-LABEL: vp_ctpop_nxv2i32_unmasked:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetvli zero, a0, e32, m1, ta, ma
+; RV32-NEXT: vsrl.vi v9, v8, 1
+; RV32-NEXT: lui a0, 349525
+; RV32-NEXT: addi a0, a0, 1365
+; RV32-NEXT: vand.vx v9, v9, a0
+; RV32-NEXT: vsub.vv v8, v8, v9
+; RV32-NEXT: lui a0, 209715
+; RV32-NEXT: addi a0, a0, 819
+; RV32-NEXT: vand.vx v9, v8, a0
+; RV32-NEXT: vsrl.vi v8, v8, 2
+; RV32-NEXT: vand.vx v8, v8, a0
+; RV32-NEXT: vadd.vv v8, v9, v8
+; RV32-NEXT: vsrl.vi v9, v8, 4
+; RV32-NEXT: vadd.vv v8, v8, v9
+; RV32-NEXT: lui a0, 61681
+; RV32-NEXT: addi a0, a0, -241
+; RV32-NEXT: vand.vx v8, v8, a0
+; RV32-NEXT: lui a0, 4112
+; RV32-NEXT: addi a0, a0, 257
+; RV32-NEXT: vmul.vx v8, v8, a0
+; RV32-NEXT: vsrl.vi v8, v8, 24
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vp_ctpop_nxv2i32_unmasked:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetvli zero, a0, e32, m1, ta, ma
+; RV64-NEXT: vsrl.vi v9, v8, 1
+; RV64-NEXT: lui a0, 349525
+; RV64-NEXT: addiw a0, a0, 1365
+; RV64-NEXT: vand.vx v9, v9, a0
+; RV64-NEXT: vsub.vv v8, v8, v9
+; RV64-NEXT: lui a0, 209715
+; RV64-NEXT: addiw a0, a0, 819
+; RV64-NEXT: vand.vx v9, v8, a0
+; RV64-NEXT: vsrl.vi v8, v8, 2
+; RV64-NEXT: vand.vx v8, v8, a0
+; RV64-NEXT: vadd.vv v8, v9, v8
+; RV64-NEXT: vsrl.vi v9, v8, 4
+; RV64-NEXT: vadd.vv v8, v8, v9
+; RV64-NEXT: lui a0, 61681
+; RV64-NEXT: addiw a0, a0, -241
+; RV64-NEXT: vand.vx v8, v8, a0
+; RV64-NEXT: lui a0, 4112
+; RV64-NEXT: addiw a0, a0, 257
+; RV64-NEXT: vmul.vx v8, v8, a0
+; RV64-NEXT: vsrl.vi v8, v8, 24
+; RV64-NEXT: ret
+ %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
+ %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
+ %v = call <vscale x 2 x i32> @llvm.vp.ctpop.nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i1> %m, i32 %evl)
+ ret <vscale x 2 x i32> %v
+}
+
+declare <vscale x 4 x i32> @llvm.vp.ctpop.nxv4i32(<vscale x 4 x i32>, <vscale x 4 x i1>, i32)
+
+define <vscale x 4 x i32> @vp_ctpop_nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i1> %m, i32 zeroext %evl) {
+; RV32-LABEL: vp_ctpop_nxv4i32:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetvli zero, a0, e32, m2, ta, ma
+; RV32-NEXT: vsrl.vi v10, v8, 1, v0.t
+; RV32-NEXT: lui a0, 349525
+; RV32-NEXT: addi a0, a0, 1365
+; RV32-NEXT: vand.vx v10, v10, a0, v0.t
+; RV32-NEXT: vsub.vv v8, v8, v10, v0.t
+; RV32-NEXT: lui a0, 209715
+; RV32-NEXT: addi a0, a0, 819
+; RV32-NEXT: vand.vx v10, v8, a0, v0.t
+; RV32-NEXT: vsrl.vi v8, v8, 2, v0.t
+; RV32-NEXT: vand.vx v8, v8, a0, v0.t
+; RV32-NEXT: vadd.vv v8, v10, v8, v0.t
+; RV32-NEXT: vsrl.vi v10, v8, 4, v0.t
+; RV32-NEXT: vadd.vv v8, v8, v10, v0.t
+; RV32-NEXT: lui a0, 61681
+; RV32-NEXT: addi a0, a0, -241
+; RV32-NEXT: vand.vx v8, v8, a0, v0.t
+; RV32-NEXT: lui a0, 4112
+; RV32-NEXT: addi a0, a0, 257
+; RV32-NEXT: vmul.vx v8, v8, a0, v0.t
+; RV32-NEXT: vsrl.vi v8, v8, 24, v0.t
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vp_ctpop_nxv4i32:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetvli zero, a0, e32, m2, ta, ma
+; RV64-NEXT: vsrl.vi v10, v8, 1, v0.t
+; RV64-NEXT: lui a0, 349525
+; RV64-NEXT: addiw a0, a0, 1365
+; RV64-NEXT: vand.vx v10, v10, a0, v0.t
+; RV64-NEXT: vsub.vv v8, v8, v10, v0.t
+; RV64-NEXT: lui a0, 209715
+; RV64-NEXT: addiw a0, a0, 819
+; RV64-NEXT: vand.vx v10, v8, a0, v0.t
+; RV64-NEXT: vsrl.vi v8, v8, 2, v0.t
+; RV64-NEXT: vand.vx v8, v8, a0, v0.t
+; RV64-NEXT: vadd.vv v8, v10, v8, v0.t
+; RV64-NEXT: vsrl.vi v10, v8, 4, v0.t
+; RV64-NEXT: vadd.vv v8, v8, v10, v0.t
+; RV64-NEXT: lui a0, 61681
+; RV64-NEXT: addiw a0, a0, -241
+; RV64-NEXT: vand.vx v8, v8, a0, v0.t
+; RV64-NEXT: lui a0, 4112
+; RV64-NEXT: addiw a0, a0, 257
+; RV64-NEXT: vmul.vx v8, v8, a0, v0.t
+; RV64-NEXT: vsrl.vi v8, v8, 24, v0.t
+; RV64-NEXT: ret
+ %v = call <vscale x 4 x i32> @llvm.vp.ctpop.nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i1> %m, i32 %evl)
+ ret <vscale x 4 x i32> %v
+}
+
+define <vscale x 4 x i32> @vp_ctpop_nxv4i32_unmasked(<vscale x 4 x i32> %va, i32 zeroext %evl) {
+; RV32-LABEL: vp_ctpop_nxv4i32_unmasked:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetvli zero, a0, e32, m2, ta, ma
+; RV32-NEXT: vsrl.vi v10, v8, 1
+; RV32-NEXT: lui a0, 349525
+; RV32-NEXT: addi a0, a0, 1365
+; RV32-NEXT: vand.vx v10, v10, a0
+; RV32-NEXT: vsub.vv v8, v8, v10
+; RV32-NEXT: lui a0, 209715
+; RV32-NEXT: addi a0, a0, 819
+; RV32-NEXT: vand.vx v10, v8, a0
+; RV32-NEXT: vsrl.vi v8, v8, 2
+; RV32-NEXT: vand.vx v8, v8, a0
+; RV32-NEXT: vadd.vv v8, v10, v8
+; RV32-NEXT: vsrl.vi v10, v8, 4
+; RV32-NEXT: vadd.vv v8, v8, v10
+; RV32-NEXT: lui a0, 61681
+; RV32-NEXT: addi a0, a0, -241
+; RV32-NEXT: vand.vx v8, v8, a0
+; RV32-NEXT: lui a0, 4112
+; RV32-NEXT: addi a0, a0, 257
+; RV32-NEXT: vmul.vx v8, v8, a0
+; RV32-NEXT: vsrl.vi v8, v8, 24
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vp_ctpop_nxv4i32_unmasked:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetvli zero, a0, e32, m2, ta, ma
+; RV64-NEXT: vsrl.vi v10, v8, 1
+; RV64-NEXT: lui a0, 349525
+; RV64-NEXT: addiw a0, a0, 1365
+; RV64-NEXT: vand.vx v10, v10, a0
+; RV64-NEXT: vsub.vv v8, v8, v10
+; RV64-NEXT: lui a0, 209715
+; RV64-NEXT: addiw a0, a0, 819
+; RV64-NEXT: vand.vx v10, v8, a0
+; RV64-NEXT: vsrl.vi v8, v8, 2
+; RV64-NEXT: vand.vx v8, v8, a0
+; RV64-NEXT: vadd.vv v8, v10, v8
+; RV64-NEXT: vsrl.vi v10, v8, 4
+; RV64-NEXT: vadd.vv v8, v8, v10
+; RV64-NEXT: lui a0, 61681
+; RV64-NEXT: addiw a0, a0, -241
+; RV64-NEXT: vand.vx v8, v8, a0
+; RV64-NEXT: lui a0, 4112
+; RV64-NEXT: addiw a0, a0, 257
+; RV64-NEXT: vmul.vx v8, v8, a0
+; RV64-NEXT: vsrl.vi v8, v8, 24
+; RV64-NEXT: ret
+ %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
+ %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
+ %v = call <vscale x 4 x i32> @llvm.vp.ctpop.nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i1> %m, i32 %evl)
+ ret <vscale x 4 x i32> %v
+}
+
+declare <vscale x 8 x i32> @llvm.vp.ctpop.nxv8i32(<vscale x 8 x i32>, <vscale x 8 x i1>, i32)
+
+define <vscale x 8 x i32> @vp_ctpop_nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i1> %m, i32 zeroext %evl) {
+; RV32-LABEL: vp_ctpop_nxv8i32:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetvli zero, a0, e32, m4, ta, ma
+; RV32-NEXT: vsrl.vi v12, v8, 1, v0.t
+; RV32-NEXT: lui a0, 349525
+; RV32-NEXT: addi a0, a0, 1365
+; RV32-NEXT: vand.vx v12, v12, a0, v0.t
+; RV32-NEXT: vsub.vv v8, v8, v12, v0.t
+; RV32-NEXT: lui a0, 209715
+; RV32-NEXT: addi a0, a0, 819
+; RV32-NEXT: vand.vx v12, v8, a0, v0.t
+; RV32-NEXT: vsrl.vi v8, v8, 2, v0.t
+; RV32-NEXT: vand.vx v8, v8, a0, v0.t
+; RV32-NEXT: vadd.vv v8, v12, v8, v0.t
+; RV32-NEXT: vsrl.vi v12, v8, 4, v0.t
+; RV32-NEXT: vadd.vv v8, v8, v12, v0.t
+; RV32-NEXT: lui a0, 61681
+; RV32-NEXT: addi a0, a0, -241
+; RV32-NEXT: vand.vx v8, v8, a0, v0.t
+; RV32-NEXT: lui a0, 4112
+; RV32-NEXT: addi a0, a0, 257
+; RV32-NEXT: vmul.vx v8, v8, a0, v0.t
+; RV32-NEXT: vsrl.vi v8, v8, 24, v0.t
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vp_ctpop_nxv8i32:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetvli zero, a0, e32, m4, ta, ma
+; RV64-NEXT: vsrl.vi v12, v8, 1, v0.t
+; RV64-NEXT: lui a0, 349525
+; RV64-NEXT: addiw a0, a0, 1365
+; RV64-NEXT: vand.vx v12, v12, a0, v0.t
+; RV64-NEXT: vsub.vv v8, v8, v12, v0.t
+; RV64-NEXT: lui a0, 209715
+; RV64-NEXT: addiw a0, a0, 819
+; RV64-NEXT: vand.vx v12, v8, a0, v0.t
+; RV64-NEXT: vsrl.vi v8, v8, 2, v0.t
+; RV64-NEXT: vand.vx v8, v8, a0, v0.t
+; RV64-NEXT: vadd.vv v8, v12, v8, v0.t
+; RV64-NEXT: vsrl.vi v12, v8, 4, v0.t
+; RV64-NEXT: vadd.vv v8, v8, v12, v0.t
+; RV64-NEXT: lui a0, 61681
+; RV64-NEXT: addiw a0, a0, -241
+; RV64-NEXT: vand.vx v8, v8, a0, v0.t
+; RV64-NEXT: lui a0, 4112
+; RV64-NEXT: addiw a0, a0, 257
+; RV64-NEXT: vmul.vx v8, v8, a0, v0.t
+; RV64-NEXT: vsrl.vi v8, v8, 24, v0.t
+; RV64-NEXT: ret
+ %v = call <vscale x 8 x i32> @llvm.vp.ctpop.nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i1> %m, i32 %evl)
+ ret <vscale x 8 x i32> %v
+}
+
+define <vscale x 8 x i32> @vp_ctpop_nxv8i32_unmasked(<vscale x 8 x i32> %va, i32 zeroext %evl) {
+; RV32-LABEL: vp_ctpop_nxv8i32_unmasked:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetvli zero, a0, e32, m4, ta, ma
+; RV32-NEXT: vsrl.vi v12, v8, 1
+; RV32-NEXT: lui a0, 349525
+; RV32-NEXT: addi a0, a0, 1365
+; RV32-NEXT: vand.vx v12, v12, a0
+; RV32-NEXT: vsub.vv v8, v8, v12
+; RV32-NEXT: lui a0, 209715
+; RV32-NEXT: addi a0, a0, 819
+; RV32-NEXT: vand.vx v12, v8, a0
+; RV32-NEXT: vsrl.vi v8, v8, 2
+; RV32-NEXT: vand.vx v8, v8, a0
+; RV32-NEXT: vadd.vv v8, v12, v8
+; RV32-NEXT: vsrl.vi v12, v8, 4
+; RV32-NEXT: vadd.vv v8, v8, v12
+; RV32-NEXT: lui a0, 61681
+; RV32-NEXT: addi a0, a0, -241
+; RV32-NEXT: vand.vx v8, v8, a0
+; RV32-NEXT: lui a0, 4112
+; RV32-NEXT: addi a0, a0, 257
+; RV32-NEXT: vmul.vx v8, v8, a0
+; RV32-NEXT: vsrl.vi v8, v8, 24
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vp_ctpop_nxv8i32_unmasked:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetvli zero, a0, e32, m4, ta, ma
+; RV64-NEXT: vsrl.vi v12, v8, 1
+; RV64-NEXT: lui a0, 349525
+; RV64-NEXT: addiw a0, a0, 1365
+; RV64-NEXT: vand.vx v12, v12, a0
+; RV64-NEXT: vsub.vv v8, v8, v12
+; RV64-NEXT: lui a0, 209715
+; RV64-NEXT: addiw a0, a0, 819
+; RV64-NEXT: vand.vx v12, v8, a0
+; RV64-NEXT: vsrl.vi v8, v8, 2
+; RV64-NEXT: vand.vx v8, v8, a0
+; RV64-NEXT: vadd.vv v8, v12, v8
+; RV64-NEXT: vsrl.vi v12, v8, 4
+; RV64-NEXT: vadd.vv v8, v8, v12
+; RV64-NEXT: lui a0, 61681
+; RV64-NEXT: addiw a0, a0, -241
+; RV64-NEXT: vand.vx v8, v8, a0
+; RV64-NEXT: lui a0, 4112
+; RV64-NEXT: addiw a0, a0, 257
+; RV64-NEXT: vmul.vx v8, v8, a0
+; RV64-NEXT: vsrl.vi v8, v8, 24
+; RV64-NEXT: ret
+ %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
+ %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
+ %v = call <vscale x 8 x i32> @llvm.vp.ctpop.nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i1> %m, i32 %evl)
+ ret <vscale x 8 x i32> %v
+}
+
+declare <vscale x 16 x i32> @llvm.vp.ctpop.nxv16i32(<vscale x 16 x i32>, <vscale x 16 x i1>, i32)
+
+define <vscale x 16 x i32> @vp_ctpop_nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i1> %m, i32 zeroext %evl) {
+; RV32-LABEL: vp_ctpop_nxv16i32:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetvli zero, a0, e32, m8, ta, ma
+; RV32-NEXT: vsrl.vi v16, v8, 1, v0.t
+; RV32-NEXT: lui a0, 349525
+; RV32-NEXT: addi a0, a0, 1365
+; RV32-NEXT: vand.vx v16, v16, a0, v0.t
+; RV32-NEXT: vsub.vv v8, v8, v16, v0.t
+; RV32-NEXT: lui a0, 209715
+; RV32-NEXT: addi a0, a0, 819
+; RV32-NEXT: vand.vx v16, v8, a0, v0.t
+; RV32-NEXT: vsrl.vi v8, v8, 2, v0.t
+; RV32-NEXT: vand.vx v8, v8, a0, v0.t
+; RV32-NEXT: vadd.vv v8, v16, v8, v0.t
+; RV32-NEXT: vsrl.vi v16, v8, 4, v0.t
+; RV32-NEXT: vadd.vv v8, v8, v16, v0.t
+; RV32-NEXT: lui a0, 61681
+; RV32-NEXT: addi a0, a0, -241
+; RV32-NEXT: vand.vx v8, v8, a0, v0.t
+; RV32-NEXT: lui a0, 4112
+; RV32-NEXT: addi a0, a0, 257
+; RV32-NEXT: vmul.vx v8, v8, a0, v0.t
+; RV32-NEXT: vsrl.vi v8, v8, 24, v0.t
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vp_ctpop_nxv16i32:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetvli zero, a0, e32, m8, ta, ma
+; RV64-NEXT: vsrl.vi v16, v8, 1, v0.t
+; RV64-NEXT: lui a0, 349525
+; RV64-NEXT: addiw a0, a0, 1365
+; RV64-NEXT: vand.vx v16, v16, a0, v0.t
+; RV64-NEXT: vsub.vv v8, v8, v16, v0.t
+; RV64-NEXT: lui a0, 209715
+; RV64-NEXT: addiw a0, a0, 819
+; RV64-NEXT: vand.vx v16, v8, a0, v0.t
+; RV64-NEXT: vsrl.vi v8, v8, 2, v0.t
+; RV64-NEXT: vand.vx v8, v8, a0, v0.t
+; RV64-NEXT: vadd.vv v8, v16, v8, v0.t
+; RV64-NEXT: vsrl.vi v16, v8, 4, v0.t
+; RV64-NEXT: vadd.vv v8, v8, v16, v0.t
+; RV64-NEXT: lui a0, 61681
+; RV64-NEXT: addiw a0, a0, -241
+; RV64-NEXT: vand.vx v8, v8, a0, v0.t
+; RV64-NEXT: lui a0, 4112
+; RV64-NEXT: addiw a0, a0, 257
+; RV64-NEXT: vmul.vx v8, v8, a0, v0.t
+; RV64-NEXT: vsrl.vi v8, v8, 24, v0.t
+; RV64-NEXT: ret
+ %v = call <vscale x 16 x i32> @llvm.vp.ctpop.nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i1> %m, i32 %evl)
+ ret <vscale x 16 x i32> %v
+}
+
+define <vscale x 16 x i32> @vp_ctpop_nxv16i32_unmasked(<vscale x 16 x i32> %va, i32 zeroext %evl) {
+; RV32-LABEL: vp_ctpop_nxv16i32_unmasked:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetvli zero, a0, e32, m8, ta, ma
+; RV32-NEXT: vsrl.vi v16, v8, 1
+; RV32-NEXT: lui a0, 349525
+; RV32-NEXT: addi a0, a0, 1365
+; RV32-NEXT: vand.vx v16, v16, a0
+; RV32-NEXT: vsub.vv v8, v8, v16
+; RV32-NEXT: lui a0, 209715
+; RV32-NEXT: addi a0, a0, 819
+; RV32-NEXT: vand.vx v16, v8, a0
+; RV32-NEXT: vsrl.vi v8, v8, 2
+; RV32-NEXT: vand.vx v8, v8, a0
+; RV32-NEXT: vadd.vv v8, v16, v8
+; RV32-NEXT: vsrl.vi v16, v8, 4
+; RV32-NEXT: vadd.vv v8, v8, v16
+; RV32-NEXT: lui a0, 61681
+; RV32-NEXT: addi a0, a0, -241
+; RV32-NEXT: vand.vx v8, v8, a0
+; RV32-NEXT: lui a0, 4112
+; RV32-NEXT: addi a0, a0, 257
+; RV32-NEXT: vmul.vx v8, v8, a0
+; RV32-NEXT: vsrl.vi v8, v8, 24
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vp_ctpop_nxv16i32_unmasked:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetvli zero, a0, e32, m8, ta, ma
+; RV64-NEXT: vsrl.vi v16, v8, 1
+; RV64-NEXT: lui a0, 349525
+; RV64-NEXT: addiw a0, a0, 1365
+; RV64-NEXT: vand.vx v16, v16, a0
+; RV64-NEXT: vsub.vv v8, v8, v16
+; RV64-NEXT: lui a0, 209715
+; RV64-NEXT: addiw a0, a0, 819
+; RV64-NEXT: vand.vx v16, v8, a0
+; RV64-NEXT: vsrl.vi v8, v8, 2
+; RV64-NEXT: vand.vx v8, v8, a0
+; RV64-NEXT: vadd.vv v8, v16, v8
+; RV64-NEXT: vsrl.vi v16, v8, 4
+; RV64-NEXT: vadd.vv v8, v8, v16
+; RV64-NEXT: lui a0, 61681
+; RV64-NEXT: addiw a0, a0, -241
+; RV64-NEXT: vand.vx v8, v8, a0
+; RV64-NEXT: lui a0, 4112
+; RV64-NEXT: addiw a0, a0, 257
+; RV64-NEXT: vmul.vx v8, v8, a0
+; RV64-NEXT: vsrl.vi v8, v8, 24
+; RV64-NEXT: ret
+ %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
+ %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
+ %v = call <vscale x 16 x i32> @llvm.vp.ctpop.nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i1> %m, i32 %evl)
+ ret <vscale x 16 x i32> %v
+}
+
+declare <vscale x 1 x i64> @llvm.vp.ctpop.nxv1i64(<vscale x 1 x i64>, <vscale x 1 x i1>, i32)
+
+define <vscale x 1 x i64> @vp_ctpop_nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i1> %m, i32 zeroext %evl) {
+; RV32-LABEL: vp_ctpop_nxv1i64:
+; RV32: # %bb.0:
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: .cfi_def_cfa_offset 16
+; RV32-NEXT: lui a1, 349525
+; RV32-NEXT: addi a1, a1, 1365
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a1, 8(sp)
+; RV32-NEXT: lui a1, 209715
+; RV32-NEXT: addi a1, a1, 819
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a1, 8(sp)
+; RV32-NEXT: lui a1, 61681
+; RV32-NEXT: addi a1, a1, -241
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a1, 8(sp)
+; RV32-NEXT: lui a1, 4112
+; RV32-NEXT: addi a1, a1, 257
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a1, 8(sp)
+; RV32-NEXT: vsetvli zero, a0, e64, m1, ta, ma
+; RV32-NEXT: vsrl.vi v9, v8, 1, v0.t
+; RV32-NEXT: addi a1, sp, 8
+; RV32-NEXT: vsetvli a2, zero, e64, m1, ta, ma
+; RV32-NEXT: vlse64.v v10, (a1), zero
+; RV32-NEXT: vsetvli zero, a0, e64, m1, ta, ma
+; RV32-NEXT: vand.vv v9, v9, v10, v0.t
+; RV32-NEXT: vsub.vv v8, v8, v9, v0.t
+; RV32-NEXT: vsetvli a2, zero, e64, m1, ta, ma
+; RV32-NEXT: vlse64.v v9, (a1), zero
+; RV32-NEXT: vsetvli zero, a0, e64, m1, ta, ma
+; RV32-NEXT: vand.vv v10, v8, v9, v0.t
+; RV32-NEXT: vsrl.vi v8, v8, 2, v0.t
+; RV32-NEXT: vand.vv v8, v8, v9, v0.t
+; RV32-NEXT: vadd.vv v8, v10, v8, v0.t
+; RV32-NEXT: vsrl.vi v9, v8, 4, v0.t
+; RV32-NEXT: vadd.vv v8, v8, v9, v0.t
+; RV32-NEXT: vsetvli a2, zero, e64, m1, ta, ma
+; RV32-NEXT: vlse64.v v9, (a1), zero
+; RV32-NEXT: vsetvli zero, a0, e64, m1, ta, ma
+; RV32-NEXT: vand.vv v8, v8, v9, v0.t
+; RV32-NEXT: vsetvli a2, zero, e64, m1, ta, ma
+; RV32-NEXT: vlse64.v v9, (a1), zero
+; RV32-NEXT: vsetvli zero, a0, e64, m1, ta, ma
+; RV32-NEXT: vmul.vv v8, v8, v9, v0.t
+; RV32-NEXT: li a0, 56
+; RV32-NEXT: vsrl.vx v8, v8, a0, v0.t
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vp_ctpop_nxv1i64:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetvli zero, a0, e64, m1, ta, ma
+; RV64-NEXT: lui a0, %hi(.LCPI36_0)
+; RV64-NEXT: ld a0, %lo(.LCPI36_0)(a0)
+; RV64-NEXT: lui a1, %hi(.LCPI36_1)
+; RV64-NEXT: ld a1, %lo(.LCPI36_1)(a1)
+; RV64-NEXT: vsrl.vi v9, v8, 1, v0.t
+; RV64-NEXT: vand.vx v9, v9, a0, v0.t
+; RV64-NEXT: vsub.vv v8, v8, v9, v0.t
+; RV64-NEXT: vand.vx v9, v8, a1, v0.t
+; RV64-NEXT: vsrl.vi v8, v8, 2, v0.t
+; RV64-NEXT: vand.vx v8, v8, a1, v0.t
+; RV64-NEXT: vadd.vv v8, v9, v8, v0.t
+; RV64-NEXT: lui a0, %hi(.LCPI36_2)
+; RV64-NEXT: ld a0, %lo(.LCPI36_2)(a0)
+; RV64-NEXT: lui a1, %hi(.LCPI36_3)
+; RV64-NEXT: ld a1, %lo(.LCPI36_3)(a1)
+; RV64-NEXT: vsrl.vi v9, v8, 4, v0.t
+; RV64-NEXT: vadd.vv v8, v8, v9, v0.t
+; RV64-NEXT: vand.vx v8, v8, a0, v0.t
+; RV64-NEXT: vmul.vx v8, v8, a1, v0.t
+; RV64-NEXT: li a0, 56
+; RV64-NEXT: vsrl.vx v8, v8, a0, v0.t
+; RV64-NEXT: ret
+ %v = call <vscale x 1 x i64> @llvm.vp.ctpop.nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i1> %m, i32 %evl)
+ ret <vscale x 1 x i64> %v
+}
+
+define <vscale x 1 x i64> @vp_ctpop_nxv1i64_unmasked(<vscale x 1 x i64> %va, i32 zeroext %evl) {
+; RV32-LABEL: vp_ctpop_nxv1i64_unmasked:
+; RV32: # %bb.0:
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: .cfi_def_cfa_offset 16
+; RV32-NEXT: lui a1, 349525
+; RV32-NEXT: addi a1, a1, 1365
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a1, 8(sp)
+; RV32-NEXT: lui a1, 209715
+; RV32-NEXT: addi a1, a1, 819
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a1, 8(sp)
+; RV32-NEXT: lui a1, 61681
+; RV32-NEXT: addi a1, a1, -241
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a1, 8(sp)
+; RV32-NEXT: lui a1, 4112
+; RV32-NEXT: addi a1, a1, 257
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a1, 8(sp)
+; RV32-NEXT: vsetvli zero, a0, e64, m1, ta, ma
+; RV32-NEXT: vsrl.vi v9, v8, 1
+; RV32-NEXT: addi a1, sp, 8
+; RV32-NEXT: vsetvli a2, zero, e64, m1, ta, ma
+; RV32-NEXT: vlse64.v v10, (a1), zero
+; RV32-NEXT: vsetvli zero, a0, e64, m1, ta, ma
+; RV32-NEXT: vand.vv v9, v9, v10
+; RV32-NEXT: vsub.vv v8, v8, v9
+; RV32-NEXT: vsetvli a2, zero, e64, m1, ta, ma
+; RV32-NEXT: vlse64.v v9, (a1), zero
+; RV32-NEXT: vsetvli zero, a0, e64, m1, ta, ma
+; RV32-NEXT: vand.vv v10, v8, v9
+; RV32-NEXT: vsrl.vi v8, v8, 2
+; RV32-NEXT: vand.vv v8, v8, v9
+; RV32-NEXT: vadd.vv v8, v10, v8
+; RV32-NEXT: vsrl.vi v9, v8, 4
+; RV32-NEXT: vadd.vv v8, v8, v9
+; RV32-NEXT: vsetvli a2, zero, e64, m1, ta, ma
+; RV32-NEXT: vlse64.v v9, (a1), zero
+; RV32-NEXT: vsetvli zero, a0, e64, m1, ta, ma
+; RV32-NEXT: vand.vv v8, v8, v9
+; RV32-NEXT: vsetvli a2, zero, e64, m1, ta, ma
+; RV32-NEXT: vlse64.v v9, (a1), zero
+; RV32-NEXT: vsetvli zero, a0, e64, m1, ta, ma
+; RV32-NEXT: vmul.vv v8, v8, v9
+; RV32-NEXT: li a0, 56
+; RV32-NEXT: vsrl.vx v8, v8, a0
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vp_ctpop_nxv1i64_unmasked:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetvli zero, a0, e64, m1, ta, ma
+; RV64-NEXT: lui a0, %hi(.LCPI37_0)
+; RV64-NEXT: ld a0, %lo(.LCPI37_0)(a0)
+; RV64-NEXT: lui a1, %hi(.LCPI37_1)
+; RV64-NEXT: ld a1, %lo(.LCPI37_1)(a1)
+; RV64-NEXT: vsrl.vi v9, v8, 1
+; RV64-NEXT: vand.vx v9, v9, a0
+; RV64-NEXT: vsub.vv v8, v8, v9
+; RV64-NEXT: vand.vx v9, v8, a1
+; RV64-NEXT: vsrl.vi v8, v8, 2
+; RV64-NEXT: vand.vx v8, v8, a1
+; RV64-NEXT: vadd.vv v8, v9, v8
+; RV64-NEXT: lui a0, %hi(.LCPI37_2)
+; RV64-NEXT: ld a0, %lo(.LCPI37_2)(a0)
+; RV64-NEXT: lui a1, %hi(.LCPI37_3)
+; RV64-NEXT: ld a1, %lo(.LCPI37_3)(a1)
+; RV64-NEXT: vsrl.vi v9, v8, 4
+; RV64-NEXT: vadd.vv v8, v8, v9
+; RV64-NEXT: vand.vx v8, v8, a0
+; RV64-NEXT: vmul.vx v8, v8, a1
+; RV64-NEXT: li a0, 56
+; RV64-NEXT: vsrl.vx v8, v8, a0
+; RV64-NEXT: ret
+ %head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
+ %m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
+ %v = call <vscale x 1 x i64> @llvm.vp.ctpop.nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i1> %m, i32 %evl)
+ ret <vscale x 1 x i64> %v
+}
+
+declare <vscale x 2 x i64> @llvm.vp.ctpop.nxv2i64(<vscale x 2 x i64>, <vscale x 2 x i1>, i32)
+
+define <vscale x 2 x i64> @vp_ctpop_nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i1> %m, i32 zeroext %evl) {
+; RV32-LABEL: vp_ctpop_nxv2i64:
+; RV32: # %bb.0:
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: .cfi_def_cfa_offset 16
+; RV32-NEXT: lui a1, 349525
+; RV32-NEXT: addi a1, a1, 1365
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a1, 8(sp)
+; RV32-NEXT: lui a1, 209715
+; RV32-NEXT: addi a1, a1, 819
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a1, 8(sp)
+; RV32-NEXT: lui a1, 61681
+; RV32-NEXT: addi a1, a1, -241
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a1, 8(sp)
+; RV32-NEXT: lui a1, 4112
+; RV32-NEXT: addi a1, a1, 257
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a1, 8(sp)
+; RV32-NEXT: vsetvli zero, a0, e64, m2, ta, ma
+; RV32-NEXT: vsrl.vi v10, v8, 1, v0.t
+; RV32-NEXT: addi a1, sp, 8
+; RV32-NEXT: vsetvli a2, zero, e64, m2, ta, ma
+; RV32-NEXT: vlse64.v v12, (a1), zero
+; RV32-NEXT: vsetvli zero, a0, e64, m2, ta, ma
+; RV32-NEXT: vand.vv v10, v10, v12, v0.t
+; RV32-NEXT: vsub.vv v8, v8, v10, v0.t
+; RV32-NEXT: vsetvli a2, zero, e64, m2, ta, ma
+; RV32-NEXT: vlse64.v v10, (a1), zero
+; RV32-NEXT: vsetvli zero, a0, e64, m2, ta, ma
+; RV32-NEXT: vand.vv v12, v8, v10, v0.t
+; RV32-NEXT: vsrl.vi v8, v8, 2, v0.t
+; RV32-NEXT: vand.vv v8, v8, v10, v0.t
+; RV32-NEXT: vadd.vv v8, v12, v8, v0.t
+; RV32-NEXT: vsrl.vi v10, v8, 4, v0.t
+; RV32-NEXT: vadd.vv v8, v8, v10, v0.t
+; RV32-NEXT: vsetvli a2, zero, e64, m2, ta, ma
+; RV32-NEXT: vlse64.v v10, (a1), zero
+; RV32-NEXT: vsetvli zero, a0, e64, m2, ta, ma
+; RV32-NEXT: vand.vv v8, v8, v10, v0.t
+; RV32-NEXT: vsetvli a2, zero, e64, m2, ta, ma
+; RV32-NEXT: vlse64.v v10, (a1), zero
+; RV32-NEXT: vsetvli zero, a0, e64, m2, ta, ma
+; RV32-NEXT: vmul.vv v8, v8, v10, v0.t
+; RV32-NEXT: li a0, 56
+; RV32-NEXT: vsrl.vx v8, v8, a0, v0.t
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vp_ctpop_nxv2i64:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetvli zero, a0, e64, m2, ta, ma
+; RV64-NEXT: lui a0, %hi(.LCPI38_0)
+; RV64-NEXT: ld a0, %lo(.LCPI38_0)(a0)
+; RV64-NEXT: lui a1, %hi(.LCPI38_1)
+; RV64-NEXT: ld a1, %lo(.LCPI38_1)(a1)
+; RV64-NEXT: vsrl.vi v10, v8, 1, v0.t
+; RV64-NEXT: vand.vx v10, v10, a0, v0.t
+; RV64-NEXT: vsub.vv v8, v8, v10, v0.t
+; RV64-NEXT: vand.vx v10, v8, a1, v0.t
+; RV64-NEXT: vsrl.vi v8, v8, 2, v0.t
+; RV64-NEXT: vand.vx v8, v8, a1, v0.t
+; RV64-NEXT: vadd.vv v8, v10, v8, v0.t
+; RV64-NEXT: lui a0, %hi(.LCPI38_2)
+; RV64-NEXT: ld a0, %lo(.LCPI38_2)(a0)
+; RV64-NEXT: lui a1, %hi(.LCPI38_3)
+; RV64-NEXT: ld a1, %lo(.LCPI38_3)(a1)
+; RV64-NEXT: vsrl.vi v10, v8, 4, v0.t
+; RV64-NEXT: vadd.vv v8, v8, v10, v0.t
+; RV64-NEXT: vand.vx v8, v8, a0, v0.t
+; RV64-NEXT: vmul.vx v8, v8, a1, v0.t
+; RV64-NEXT: li a0, 56
+; RV64-NEXT: vsrl.vx v8, v8, a0, v0.t
+; RV64-NEXT: ret
+ %v = call <vscale x 2 x i64> @llvm.vp.ctpop.nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i1> %m, i32 %evl)
+ ret <vscale x 2 x i64> %v
+}
+
+define <vscale x 2 x i64> @vp_ctpop_nxv2i64_unmasked(<vscale x 2 x i64> %va, i32 zeroext %evl) {
+; RV32-LABEL: vp_ctpop_nxv2i64_unmasked:
+; RV32: # %bb.0:
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: .cfi_def_cfa_offset 16
+; RV32-NEXT: lui a1, 349525
+; RV32-NEXT: addi a1, a1, 1365
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a1, 8(sp)
+; RV32-NEXT: lui a1, 209715
+; RV32-NEXT: addi a1, a1, 819
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a1, 8(sp)
+; RV32-NEXT: lui a1, 61681
+; RV32-NEXT: addi a1, a1, -241
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a1, 8(sp)
+; RV32-NEXT: lui a1, 4112
+; RV32-NEXT: addi a1, a1, 257
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a1, 8(sp)
+; RV32-NEXT: vsetvli zero, a0, e64, m2, ta, ma
+; RV32-NEXT: vsrl.vi v10, v8, 1
+; RV32-NEXT: addi a1, sp, 8
+; RV32-NEXT: vsetvli a2, zero, e64, m2, ta, ma
+; RV32-NEXT: vlse64.v v12, (a1), zero
+; RV32-NEXT: vsetvli zero, a0, e64, m2, ta, ma
+; RV32-NEXT: vand.vv v10, v10, v12
+; RV32-NEXT: vsub.vv v8, v8, v10
+; RV32-NEXT: vsetvli a2, zero, e64, m2, ta, ma
+; RV32-NEXT: vlse64.v v10, (a1), zero
+; RV32-NEXT: vsetvli zero, a0, e64, m2, ta, ma
+; RV32-NEXT: vand.vv v12, v8, v10
+; RV32-NEXT: vsrl.vi v8, v8, 2
+; RV32-NEXT: vand.vv v8, v8, v10
+; RV32-NEXT: vadd.vv v8, v12, v8
+; RV32-NEXT: vsrl.vi v10, v8, 4
+; RV32-NEXT: vadd.vv v8, v8, v10
+; RV32-NEXT: vsetvli a2, zero, e64, m2, ta, ma
+; RV32-NEXT: vlse64.v v10, (a1), zero
+; RV32-NEXT: vsetvli zero, a0, e64, m2, ta, ma
+; RV32-NEXT: vand.vv v8, v8, v10
+; RV32-NEXT: vsetvli a2, zero, e64, m2, ta, ma
+; RV32-NEXT: vlse64.v v10, (a1), zero
+; RV32-NEXT: vsetvli zero, a0, e64, m2, ta, ma
+; RV32-NEXT: vmul.vv v8, v8, v10
+; RV32-NEXT: li a0, 56
+; RV32-NEXT: vsrl.vx v8, v8, a0
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vp_ctpop_nxv2i64_unmasked:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetvli zero, a0, e64, m2, ta, ma
+; RV64-NEXT: lui a0, %hi(.LCPI39_0)
+; RV64-NEXT: ld a0, %lo(.LCPI39_0)(a0)
+; RV64-NEXT: lui a1, %hi(.LCPI39_1)
+; RV64-NEXT: ld a1, %lo(.LCPI39_1)(a1)
+; RV64-NEXT: vsrl.vi v10, v8, 1
+; RV64-NEXT: vand.vx v10, v10, a0
+; RV64-NEXT: vsub.vv v8, v8, v10
+; RV64-NEXT: vand.vx v10, v8, a1
+; RV64-NEXT: vsrl.vi v8, v8, 2
+; RV64-NEXT: vand.vx v8, v8, a1
+; RV64-NEXT: vadd.vv v8, v10, v8
+; RV64-NEXT: lui a0, %hi(.LCPI39_2)
+; RV64-NEXT: ld a0, %lo(.LCPI39_2)(a0)
+; RV64-NEXT: lui a1, %hi(.LCPI39_3)
+; RV64-NEXT: ld a1, %lo(.LCPI39_3)(a1)
+; RV64-NEXT: vsrl.vi v10, v8, 4
+; RV64-NEXT: vadd.vv v8, v8, v10
+; RV64-NEXT: vand.vx v8, v8, a0
+; RV64-NEXT: vmul.vx v8, v8, a1
+; RV64-NEXT: li a0, 56
+; RV64-NEXT: vsrl.vx v8, v8, a0
+; RV64-NEXT: ret
+ %head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
+ %m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
+ %v = call <vscale x 2 x i64> @llvm.vp.ctpop.nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i1> %m, i32 %evl)
+ ret <vscale x 2 x i64> %v
+}
+
+declare <vscale x 4 x i64> @llvm.vp.ctpop.nxv4i64(<vscale x 4 x i64>, <vscale x 4 x i1>, i32)
+
+define <vscale x 4 x i64> @vp_ctpop_nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i1> %m, i32 zeroext %evl) {
+; RV32-LABEL: vp_ctpop_nxv4i64:
+; RV32: # %bb.0:
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: .cfi_def_cfa_offset 16
+; RV32-NEXT: lui a1, 349525
+; RV32-NEXT: addi a1, a1, 1365
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a1, 8(sp)
+; RV32-NEXT: lui a1, 209715
+; RV32-NEXT: addi a1, a1, 819
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a1, 8(sp)
+; RV32-NEXT: lui a1, 61681
+; RV32-NEXT: addi a1, a1, -241
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a1, 8(sp)
+; RV32-NEXT: lui a1, 4112
+; RV32-NEXT: addi a1, a1, 257
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a1, 8(sp)
+; RV32-NEXT: vsetvli zero, a0, e64, m4, ta, ma
+; RV32-NEXT: vsrl.vi v12, v8, 1, v0.t
+; RV32-NEXT: addi a1, sp, 8
+; RV32-NEXT: vsetvli a2, zero, e64, m4, ta, ma
+; RV32-NEXT: vlse64.v v16, (a1), zero
+; RV32-NEXT: vsetvli zero, a0, e64, m4, ta, ma
+; RV32-NEXT: vand.vv v12, v12, v16, v0.t
+; RV32-NEXT: vsub.vv v8, v8, v12, v0.t
+; RV32-NEXT: vsetvli a2, zero, e64, m4, ta, ma
+; RV32-NEXT: vlse64.v v12, (a1), zero
+; RV32-NEXT: vsetvli zero, a0, e64, m4, ta, ma
+; RV32-NEXT: vand.vv v16, v8, v12, v0.t
+; RV32-NEXT: vsrl.vi v8, v8, 2, v0.t
+; RV32-NEXT: vand.vv v8, v8, v12, v0.t
+; RV32-NEXT: vadd.vv v8, v16, v8, v0.t
+; RV32-NEXT: vsrl.vi v12, v8, 4, v0.t
+; RV32-NEXT: vadd.vv v8, v8, v12, v0.t
+; RV32-NEXT: vsetvli a2, zero, e64, m4, ta, ma
+; RV32-NEXT: vlse64.v v12, (a1), zero
+; RV32-NEXT: vsetvli zero, a0, e64, m4, ta, ma
+; RV32-NEXT: vand.vv v8, v8, v12, v0.t
+; RV32-NEXT: vsetvli a2, zero, e64, m4, ta, ma
+; RV32-NEXT: vlse64.v v12, (a1), zero
+; RV32-NEXT: vsetvli zero, a0, e64, m4, ta, ma
+; RV32-NEXT: vmul.vv v8, v8, v12, v0.t
+; RV32-NEXT: li a0, 56
+; RV32-NEXT: vsrl.vx v8, v8, a0, v0.t
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vp_ctpop_nxv4i64:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetvli zero, a0, e64, m4, ta, ma
+; RV64-NEXT: lui a0, %hi(.LCPI40_0)
+; RV64-NEXT: ld a0, %lo(.LCPI40_0)(a0)
+; RV64-NEXT: lui a1, %hi(.LCPI40_1)
+; RV64-NEXT: ld a1, %lo(.LCPI40_1)(a1)
+; RV64-NEXT: vsrl.vi v12, v8, 1, v0.t
+; RV64-NEXT: vand.vx v12, v12, a0, v0.t
+; RV64-NEXT: vsub.vv v8, v8, v12, v0.t
+; RV64-NEXT: vand.vx v12, v8, a1, v0.t
+; RV64-NEXT: vsrl.vi v8, v8, 2, v0.t
+; RV64-NEXT: vand.vx v8, v8, a1, v0.t
+; RV64-NEXT: vadd.vv v8, v12, v8, v0.t
+; RV64-NEXT: lui a0, %hi(.LCPI40_2)
+; RV64-NEXT: ld a0, %lo(.LCPI40_2)(a0)
+; RV64-NEXT: lui a1, %hi(.LCPI40_3)
+; RV64-NEXT: ld a1, %lo(.LCPI40_3)(a1)
+; RV64-NEXT: vsrl.vi v12, v8, 4, v0.t
+; RV64-NEXT: vadd.vv v8, v8, v12, v0.t
+; RV64-NEXT: vand.vx v8, v8, a0, v0.t
+; RV64-NEXT: vmul.vx v8, v8, a1, v0.t
+; RV64-NEXT: li a0, 56
+; RV64-NEXT: vsrl.vx v8, v8, a0, v0.t
+; RV64-NEXT: ret
+ %v = call <vscale x 4 x i64> @llvm.vp.ctpop.nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i1> %m, i32 %evl)
+ ret <vscale x 4 x i64> %v
+}
+
+define <vscale x 4 x i64> @vp_ctpop_nxv4i64_unmasked(<vscale x 4 x i64> %va, i32 zeroext %evl) {
+; RV32-LABEL: vp_ctpop_nxv4i64_unmasked:
+; RV32: # %bb.0:
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: .cfi_def_cfa_offset 16
+; RV32-NEXT: lui a1, 349525
+; RV32-NEXT: addi a1, a1, 1365
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a1, 8(sp)
+; RV32-NEXT: lui a1, 209715
+; RV32-NEXT: addi a1, a1, 819
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a1, 8(sp)
+; RV32-NEXT: lui a1, 61681
+; RV32-NEXT: addi a1, a1, -241
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a1, 8(sp)
+; RV32-NEXT: lui a1, 4112
+; RV32-NEXT: addi a1, a1, 257
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a1, 8(sp)
+; RV32-NEXT: vsetvli zero, a0, e64, m4, ta, ma
+; RV32-NEXT: vsrl.vi v12, v8, 1
+; RV32-NEXT: addi a1, sp, 8
+; RV32-NEXT: vsetvli a2, zero, e64, m4, ta, ma
+; RV32-NEXT: vlse64.v v16, (a1), zero
+; RV32-NEXT: vsetvli zero, a0, e64, m4, ta, ma
+; RV32-NEXT: vand.vv v12, v12, v16
+; RV32-NEXT: vsub.vv v8, v8, v12
+; RV32-NEXT: vsetvli a2, zero, e64, m4, ta, ma
+; RV32-NEXT: vlse64.v v12, (a1), zero
+; RV32-NEXT: vsetvli zero, a0, e64, m4, ta, ma
+; RV32-NEXT: vand.vv v16, v8, v12
+; RV32-NEXT: vsrl.vi v8, v8, 2
+; RV32-NEXT: vand.vv v8, v8, v12
+; RV32-NEXT: vadd.vv v8, v16, v8
+; RV32-NEXT: vsrl.vi v12, v8, 4
+; RV32-NEXT: vadd.vv v8, v8, v12
+; RV32-NEXT: vsetvli a2, zero, e64, m4, ta, ma
+; RV32-NEXT: vlse64.v v12, (a1), zero
+; RV32-NEXT: vsetvli zero, a0, e64, m4, ta, ma
+; RV32-NEXT: vand.vv v8, v8, v12
+; RV32-NEXT: vsetvli a2, zero, e64, m4, ta, ma
+; RV32-NEXT: vlse64.v v12, (a1), zero
+; RV32-NEXT: vsetvli zero, a0, e64, m4, ta, ma
+; RV32-NEXT: vmul.vv v8, v8, v12
+; RV32-NEXT: li a0, 56
+; RV32-NEXT: vsrl.vx v8, v8, a0
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vp_ctpop_nxv4i64_unmasked:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetvli zero, a0, e64, m4, ta, ma
+; RV64-NEXT: lui a0, %hi(.LCPI41_0)
+; RV64-NEXT: ld a0, %lo(.LCPI41_0)(a0)
+; RV64-NEXT: lui a1, %hi(.LCPI41_1)
+; RV64-NEXT: ld a1, %lo(.LCPI41_1)(a1)
+; RV64-NEXT: vsrl.vi v12, v8, 1
+; RV64-NEXT: vand.vx v12, v12, a0
+; RV64-NEXT: vsub.vv v8, v8, v12
+; RV64-NEXT: vand.vx v12, v8, a1
+; RV64-NEXT: vsrl.vi v8, v8, 2
+; RV64-NEXT: vand.vx v8, v8, a1
+; RV64-NEXT: vadd.vv v8, v12, v8
+; RV64-NEXT: lui a0, %hi(.LCPI41_2)
+; RV64-NEXT: ld a0, %lo(.LCPI41_2)(a0)
+; RV64-NEXT: lui a1, %hi(.LCPI41_3)
+; RV64-NEXT: ld a1, %lo(.LCPI41_3)(a1)
+; RV64-NEXT: vsrl.vi v12, v8, 4
+; RV64-NEXT: vadd.vv v8, v8, v12
+; RV64-NEXT: vand.vx v8, v8, a0
+; RV64-NEXT: vmul.vx v8, v8, a1
+; RV64-NEXT: li a0, 56
+; RV64-NEXT: vsrl.vx v8, v8, a0
+; RV64-NEXT: ret
+ %head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
+ %m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
+ %v = call <vscale x 4 x i64> @llvm.vp.ctpop.nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i1> %m, i32 %evl)
+ ret <vscale x 4 x i64> %v
+}
+
+declare <vscale x 7 x i64> @llvm.vp.ctpop.nxv7i64(<vscale x 7 x i64>, <vscale x 7 x i1>, i32)
+
+define <vscale x 7 x i64> @vp_ctpop_nxv7i64(<vscale x 7 x i64> %va, <vscale x 7 x i1> %m, i32 zeroext %evl) {
+; RV32-LABEL: vp_ctpop_nxv7i64:
+; RV32: # %bb.0:
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: .cfi_def_cfa_offset 16
+; RV32-NEXT: lui a1, 349525
+; RV32-NEXT: addi a1, a1, 1365
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a1, 8(sp)
+; RV32-NEXT: lui a1, 209715
+; RV32-NEXT: addi a1, a1, 819
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a1, 8(sp)
+; RV32-NEXT: lui a1, 61681
+; RV32-NEXT: addi a1, a1, -241
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a1, 8(sp)
+; RV32-NEXT: lui a1, 4112
+; RV32-NEXT: addi a1, a1, 257
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a1, 8(sp)
+; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV32-NEXT: vsrl.vi v16, v8, 1, v0.t
+; RV32-NEXT: addi a1, sp, 8
+; RV32-NEXT: vsetvli a2, zero, e64, m8, ta, ma
+; RV32-NEXT: vlse64.v v24, (a1), zero
+; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV32-NEXT: vand.vv v16, v16, v24, v0.t
+; RV32-NEXT: vsub.vv v8, v8, v16, v0.t
+; RV32-NEXT: vsetvli a2, zero, e64, m8, ta, ma
+; RV32-NEXT: vlse64.v v16, (a1), zero
+; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV32-NEXT: vand.vv v24, v8, v16, v0.t
+; RV32-NEXT: vsrl.vi v8, v8, 2, v0.t
+; RV32-NEXT: vand.vv v8, v8, v16, v0.t
+; RV32-NEXT: vadd.vv v8, v24, v8, v0.t
+; RV32-NEXT: vsrl.vi v16, v8, 4, v0.t
+; RV32-NEXT: vadd.vv v8, v8, v16, v0.t
+; RV32-NEXT: vsetvli a2, zero, e64, m8, ta, ma
+; RV32-NEXT: vlse64.v v16, (a1), zero
+; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV32-NEXT: vand.vv v8, v8, v16, v0.t
+; RV32-NEXT: vsetvli a2, zero, e64, m8, ta, ma
+; RV32-NEXT: vlse64.v v16, (a1), zero
+; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV32-NEXT: vmul.vv v8, v8, v16, v0.t
+; RV32-NEXT: li a0, 56
+; RV32-NEXT: vsrl.vx v8, v8, a0, v0.t
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vp_ctpop_nxv7i64:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV64-NEXT: lui a0, %hi(.LCPI42_0)
+; RV64-NEXT: ld a0, %lo(.LCPI42_0)(a0)
+; RV64-NEXT: lui a1, %hi(.LCPI42_1)
+; RV64-NEXT: ld a1, %lo(.LCPI42_1)(a1)
+; RV64-NEXT: vsrl.vi v16, v8, 1, v0.t
+; RV64-NEXT: vand.vx v16, v16, a0, v0.t
+; RV64-NEXT: vsub.vv v8, v8, v16, v0.t
+; RV64-NEXT: vand.vx v16, v8, a1, v0.t
+; RV64-NEXT: vsrl.vi v8, v8, 2, v0.t
+; RV64-NEXT: vand.vx v8, v8, a1, v0.t
+; RV64-NEXT: vadd.vv v8, v16, v8, v0.t
+; RV64-NEXT: lui a0, %hi(.LCPI42_2)
+; RV64-NEXT: ld a0, %lo(.LCPI42_2)(a0)
+; RV64-NEXT: lui a1, %hi(.LCPI42_3)
+; RV64-NEXT: ld a1, %lo(.LCPI42_3)(a1)
+; RV64-NEXT: vsrl.vi v16, v8, 4, v0.t
+; RV64-NEXT: vadd.vv v8, v8, v16, v0.t
+; RV64-NEXT: vand.vx v8, v8, a0, v0.t
+; RV64-NEXT: vmul.vx v8, v8, a1, v0.t
+; RV64-NEXT: li a0, 56
+; RV64-NEXT: vsrl.vx v8, v8, a0, v0.t
+; RV64-NEXT: ret
+ %v = call <vscale x 7 x i64> @llvm.vp.ctpop.nxv7i64(<vscale x 7 x i64> %va, <vscale x 7 x i1> %m, i32 %evl)
+ ret <vscale x 7 x i64> %v
+}
+
+define <vscale x 7 x i64> @vp_ctpop_nxv7i64_unmasked(<vscale x 7 x i64> %va, i32 zeroext %evl) {
+; RV32-LABEL: vp_ctpop_nxv7i64_unmasked:
+; RV32: # %bb.0:
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: .cfi_def_cfa_offset 16
+; RV32-NEXT: lui a1, 349525
+; RV32-NEXT: addi a1, a1, 1365
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a1, 8(sp)
+; RV32-NEXT: lui a1, 209715
+; RV32-NEXT: addi a1, a1, 819
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a1, 8(sp)
+; RV32-NEXT: lui a1, 61681
+; RV32-NEXT: addi a1, a1, -241
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a1, 8(sp)
+; RV32-NEXT: lui a1, 4112
+; RV32-NEXT: addi a1, a1, 257
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a1, 8(sp)
+; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV32-NEXT: vsrl.vi v16, v8, 1
+; RV32-NEXT: addi a1, sp, 8
+; RV32-NEXT: vsetvli a2, zero, e64, m8, ta, ma
+; RV32-NEXT: vlse64.v v24, (a1), zero
+; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV32-NEXT: vand.vv v16, v16, v24
+; RV32-NEXT: vsub.vv v8, v8, v16
+; RV32-NEXT: vsetvli a2, zero, e64, m8, ta, ma
+; RV32-NEXT: vlse64.v v16, (a1), zero
+; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV32-NEXT: vand.vv v24, v8, v16
+; RV32-NEXT: vsrl.vi v8, v8, 2
+; RV32-NEXT: vand.vv v8, v8, v16
+; RV32-NEXT: vadd.vv v8, v24, v8
+; RV32-NEXT: vsrl.vi v16, v8, 4
+; RV32-NEXT: vadd.vv v8, v8, v16
+; RV32-NEXT: vsetvli a2, zero, e64, m8, ta, ma
+; RV32-NEXT: vlse64.v v16, (a1), zero
+; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV32-NEXT: vand.vv v8, v8, v16
+; RV32-NEXT: vsetvli a2, zero, e64, m8, ta, ma
+; RV32-NEXT: vlse64.v v16, (a1), zero
+; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV32-NEXT: vmul.vv v8, v8, v16
+; RV32-NEXT: li a0, 56
+; RV32-NEXT: vsrl.vx v8, v8, a0
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vp_ctpop_nxv7i64_unmasked:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV64-NEXT: lui a0, %hi(.LCPI43_0)
+; RV64-NEXT: ld a0, %lo(.LCPI43_0)(a0)
+; RV64-NEXT: lui a1, %hi(.LCPI43_1)
+; RV64-NEXT: ld a1, %lo(.LCPI43_1)(a1)
+; RV64-NEXT: vsrl.vi v16, v8, 1
+; RV64-NEXT: vand.vx v16, v16, a0
+; RV64-NEXT: vsub.vv v8, v8, v16
+; RV64-NEXT: vand.vx v16, v8, a1
+; RV64-NEXT: vsrl.vi v8, v8, 2
+; RV64-NEXT: vand.vx v8, v8, a1
+; RV64-NEXT: vadd.vv v8, v16, v8
+; RV64-NEXT: lui a0, %hi(.LCPI43_2)
+; RV64-NEXT: ld a0, %lo(.LCPI43_2)(a0)
+; RV64-NEXT: lui a1, %hi(.LCPI43_3)
+; RV64-NEXT: ld a1, %lo(.LCPI43_3)(a1)
+; RV64-NEXT: vsrl.vi v16, v8, 4
+; RV64-NEXT: vadd.vv v8, v8, v16
+; RV64-NEXT: vand.vx v8, v8, a0
+; RV64-NEXT: vmul.vx v8, v8, a1
+; RV64-NEXT: li a0, 56
+; RV64-NEXT: vsrl.vx v8, v8, a0
+; RV64-NEXT: ret
+ %head = insertelement <vscale x 7 x i1> poison, i1 true, i32 0
+ %m = shufflevector <vscale x 7 x i1> %head, <vscale x 7 x i1> poison, <vscale x 7 x i32> zeroinitializer
+ %v = call <vscale x 7 x i64> @llvm.vp.ctpop.nxv7i64(<vscale x 7 x i64> %va, <vscale x 7 x i1> %m, i32 %evl)
+ ret <vscale x 7 x i64> %v
+}
+
+declare <vscale x 8 x i64> @llvm.vp.ctpop.nxv8i64(<vscale x 8 x i64>, <vscale x 8 x i1>, i32)
+
+define <vscale x 8 x i64> @vp_ctpop_nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i1> %m, i32 zeroext %evl) {
+; RV32-LABEL: vp_ctpop_nxv8i64:
+; RV32: # %bb.0:
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: .cfi_def_cfa_offset 16
+; RV32-NEXT: lui a1, 349525
+; RV32-NEXT: addi a1, a1, 1365
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a1, 8(sp)
+; RV32-NEXT: lui a1, 209715
+; RV32-NEXT: addi a1, a1, 819
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a1, 8(sp)
+; RV32-NEXT: lui a1, 61681
+; RV32-NEXT: addi a1, a1, -241
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a1, 8(sp)
+; RV32-NEXT: lui a1, 4112
+; RV32-NEXT: addi a1, a1, 257
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a1, 8(sp)
+; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV32-NEXT: vsrl.vi v16, v8, 1, v0.t
+; RV32-NEXT: addi a1, sp, 8
+; RV32-NEXT: vsetvli a2, zero, e64, m8, ta, ma
+; RV32-NEXT: vlse64.v v24, (a1), zero
+; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV32-NEXT: vand.vv v16, v16, v24, v0.t
+; RV32-NEXT: vsub.vv v8, v8, v16, v0.t
+; RV32-NEXT: vsetvli a2, zero, e64, m8, ta, ma
+; RV32-NEXT: vlse64.v v16, (a1), zero
+; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV32-NEXT: vand.vv v24, v8, v16, v0.t
+; RV32-NEXT: vsrl.vi v8, v8, 2, v0.t
+; RV32-NEXT: vand.vv v8, v8, v16, v0.t
+; RV32-NEXT: vadd.vv v8, v24, v8, v0.t
+; RV32-NEXT: vsrl.vi v16, v8, 4, v0.t
+; RV32-NEXT: vadd.vv v8, v8, v16, v0.t
+; RV32-NEXT: vsetvli a2, zero, e64, m8, ta, ma
+; RV32-NEXT: vlse64.v v16, (a1), zero
+; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV32-NEXT: vand.vv v8, v8, v16, v0.t
+; RV32-NEXT: vsetvli a2, zero, e64, m8, ta, ma
+; RV32-NEXT: vlse64.v v16, (a1), zero
+; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV32-NEXT: vmul.vv v8, v8, v16, v0.t
+; RV32-NEXT: li a0, 56
+; RV32-NEXT: vsrl.vx v8, v8, a0, v0.t
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vp_ctpop_nxv8i64:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV64-NEXT: lui a0, %hi(.LCPI44_0)
+; RV64-NEXT: ld a0, %lo(.LCPI44_0)(a0)
+; RV64-NEXT: lui a1, %hi(.LCPI44_1)
+; RV64-NEXT: ld a1, %lo(.LCPI44_1)(a1)
+; RV64-NEXT: vsrl.vi v16, v8, 1, v0.t
+; RV64-NEXT: vand.vx v16, v16, a0, v0.t
+; RV64-NEXT: vsub.vv v8, v8, v16, v0.t
+; RV64-NEXT: vand.vx v16, v8, a1, v0.t
+; RV64-NEXT: vsrl.vi v8, v8, 2, v0.t
+; RV64-NEXT: vand.vx v8, v8, a1, v0.t
+; RV64-NEXT: vadd.vv v8, v16, v8, v0.t
+; RV64-NEXT: lui a0, %hi(.LCPI44_2)
+; RV64-NEXT: ld a0, %lo(.LCPI44_2)(a0)
+; RV64-NEXT: lui a1, %hi(.LCPI44_3)
+; RV64-NEXT: ld a1, %lo(.LCPI44_3)(a1)
+; RV64-NEXT: vsrl.vi v16, v8, 4, v0.t
+; RV64-NEXT: vadd.vv v8, v8, v16, v0.t
+; RV64-NEXT: vand.vx v8, v8, a0, v0.t
+; RV64-NEXT: vmul.vx v8, v8, a1, v0.t
+; RV64-NEXT: li a0, 56
+; RV64-NEXT: vsrl.vx v8, v8, a0, v0.t
+; RV64-NEXT: ret
+ %v = call <vscale x 8 x i64> @llvm.vp.ctpop.nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i1> %m, i32 %evl)
+ ret <vscale x 8 x i64> %v
+}
+
+define <vscale x 8 x i64> @vp_ctpop_nxv8i64_unmasked(<vscale x 8 x i64> %va, i32 zeroext %evl) {
+; RV32-LABEL: vp_ctpop_nxv8i64_unmasked:
+; RV32: # %bb.0:
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: .cfi_def_cfa_offset 16
+; RV32-NEXT: lui a1, 349525
+; RV32-NEXT: addi a1, a1, 1365
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a1, 8(sp)
+; RV32-NEXT: lui a1, 209715
+; RV32-NEXT: addi a1, a1, 819
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a1, 8(sp)
+; RV32-NEXT: lui a1, 61681
+; RV32-NEXT: addi a1, a1, -241
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a1, 8(sp)
+; RV32-NEXT: lui a1, 4112
+; RV32-NEXT: addi a1, a1, 257
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a1, 8(sp)
+; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV32-NEXT: vsrl.vi v16, v8, 1
+; RV32-NEXT: addi a1, sp, 8
+; RV32-NEXT: vsetvli a2, zero, e64, m8, ta, ma
+; RV32-NEXT: vlse64.v v24, (a1), zero
+; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV32-NEXT: vand.vv v16, v16, v24
+; RV32-NEXT: vsub.vv v8, v8, v16
+; RV32-NEXT: vsetvli a2, zero, e64, m8, ta, ma
+; RV32-NEXT: vlse64.v v16, (a1), zero
+; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV32-NEXT: vand.vv v24, v8, v16
+; RV32-NEXT: vsrl.vi v8, v8, 2
+; RV32-NEXT: vand.vv v8, v8, v16
+; RV32-NEXT: vadd.vv v8, v24, v8
+; RV32-NEXT: vsrl.vi v16, v8, 4
+; RV32-NEXT: vadd.vv v8, v8, v16
+; RV32-NEXT: vsetvli a2, zero, e64, m8, ta, ma
+; RV32-NEXT: vlse64.v v16, (a1), zero
+; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV32-NEXT: vand.vv v8, v8, v16
+; RV32-NEXT: vsetvli a2, zero, e64, m8, ta, ma
+; RV32-NEXT: vlse64.v v16, (a1), zero
+; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV32-NEXT: vmul.vv v8, v8, v16
+; RV32-NEXT: li a0, 56
+; RV32-NEXT: vsrl.vx v8, v8, a0
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vp_ctpop_nxv8i64_unmasked:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV64-NEXT: lui a0, %hi(.LCPI45_0)
+; RV64-NEXT: ld a0, %lo(.LCPI45_0)(a0)
+; RV64-NEXT: lui a1, %hi(.LCPI45_1)
+; RV64-NEXT: ld a1, %lo(.LCPI45_1)(a1)
+; RV64-NEXT: vsrl.vi v16, v8, 1
+; RV64-NEXT: vand.vx v16, v16, a0
+; RV64-NEXT: vsub.vv v8, v8, v16
+; RV64-NEXT: vand.vx v16, v8, a1
+; RV64-NEXT: vsrl.vi v8, v8, 2
+; RV64-NEXT: vand.vx v8, v8, a1
+; RV64-NEXT: vadd.vv v8, v16, v8
+; RV64-NEXT: lui a0, %hi(.LCPI45_2)
+; RV64-NEXT: ld a0, %lo(.LCPI45_2)(a0)
+; RV64-NEXT: lui a1, %hi(.LCPI45_3)
+; RV64-NEXT: ld a1, %lo(.LCPI45_3)(a1)
+; RV64-NEXT: vsrl.vi v16, v8, 4
+; RV64-NEXT: vadd.vv v8, v8, v16
+; RV64-NEXT: vand.vx v8, v8, a0
+; RV64-NEXT: vmul.vx v8, v8, a1
+; RV64-NEXT: li a0, 56
+; RV64-NEXT: vsrl.vx v8, v8, a0
+; RV64-NEXT: ret
+ %head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
+ %m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
+ %v = call <vscale x 8 x i64> @llvm.vp.ctpop.nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i1> %m, i32 %evl)
+ ret <vscale x 8 x i64> %v
+}
+
+declare <vscale x 16 x i64> @llvm.vp.ctpop.nxv16i64(<vscale x 16 x i64>, <vscale x 16 x i1>, i32)
+
+define <vscale x 16 x i64> @vp_ctpop_nxv16i64(<vscale x 16 x i64> %va, <vscale x 16 x i1> %m, i32 zeroext %evl) {
+; RV32-LABEL: vp_ctpop_nxv16i64:
+; RV32: # %bb.0:
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: .cfi_def_cfa_offset 16
+; RV32-NEXT: csrr a1, vlenb
+; RV32-NEXT: li a2, 48
+; RV32-NEXT: mul a1, a1, a2
+; RV32-NEXT: sub sp, sp, a1
+; RV32-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x30, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 48 * vlenb
+; RV32-NEXT: vmv1r.v v1, v0
+; RV32-NEXT: csrr a1, vlenb
+; RV32-NEXT: li a2, 40
+; RV32-NEXT: mul a1, a1, a2
+; RV32-NEXT: add a1, sp, a1
+; RV32-NEXT: addi a1, a1, 16
+; RV32-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill
+; RV32-NEXT: csrr a1, vlenb
+; RV32-NEXT: srli a2, a1, 3
+; RV32-NEXT: vsetvli a3, zero, e8, mf4, ta, ma
+; RV32-NEXT: vslidedown.vx v0, v0, a2
+; RV32-NEXT: lui a2, 349525
+; RV32-NEXT: addi a2, a2, 1365
+; RV32-NEXT: sw a2, 12(sp)
+; RV32-NEXT: sw a2, 8(sp)
+; RV32-NEXT: lui a2, 209715
+; RV32-NEXT: addi a2, a2, 819
+; RV32-NEXT: sw a2, 12(sp)
+; RV32-NEXT: sw a2, 8(sp)
+; RV32-NEXT: lui a2, 61681
+; RV32-NEXT: addi a2, a2, -241
+; RV32-NEXT: sw a2, 12(sp)
+; RV32-NEXT: sw a2, 8(sp)
+; RV32-NEXT: lui a2, 4112
+; RV32-NEXT: addi a2, a2, 257
+; RV32-NEXT: sw a2, 12(sp)
+; RV32-NEXT: sw a2, 8(sp)
+; RV32-NEXT: sub a2, a0, a1
+; RV32-NEXT: sltu a3, a0, a2
+; RV32-NEXT: addi a3, a3, -1
+; RV32-NEXT: and a2, a3, a2
+; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma
+; RV32-NEXT: vsrl.vi v24, v16, 1, v0.t
+; RV32-NEXT: addi a3, sp, 8
+; RV32-NEXT: vsetvli a4, zero, e64, m8, ta, ma
+; RV32-NEXT: vlse64.v v16, (a3), zero
+; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma
+; RV32-NEXT: csrr a4, vlenb
+; RV32-NEXT: slli a4, a4, 5
+; RV32-NEXT: add a4, sp, a4
+; RV32-NEXT: addi a4, a4, 16
+; RV32-NEXT: vs8r.v v16, (a4) # Unknown-size Folded Spill
+; RV32-NEXT: vand.vv v24, v24, v16, v0.t
+; RV32-NEXT: csrr a4, vlenb
+; RV32-NEXT: li a5, 40
+; RV32-NEXT: mul a4, a4, a5
+; RV32-NEXT: add a4, sp, a4
+; RV32-NEXT: addi a4, a4, 16
+; RV32-NEXT: vl8r.v v16, (a4) # Unknown-size Folded Reload
+; RV32-NEXT: vsub.vv v16, v16, v24, v0.t
+; RV32-NEXT: csrr a4, vlenb
+; RV32-NEXT: li a5, 40
+; RV32-NEXT: mul a4, a4, a5
+; RV32-NEXT: add a4, sp, a4
+; RV32-NEXT: addi a4, a4, 16
+; RV32-NEXT: vs8r.v v16, (a4) # Unknown-size Folded Spill
+; RV32-NEXT: vsetvli a4, zero, e64, m8, ta, ma
+; RV32-NEXT: vlse64.v v16, (a3), zero
+; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma
+; RV32-NEXT: csrr a4, vlenb
+; RV32-NEXT: li a5, 40
+; RV32-NEXT: mul a4, a4, a5
+; RV32-NEXT: add a4, sp, a4
+; RV32-NEXT: addi a4, a4, 16
+; RV32-NEXT: vl8r.v v24, (a4) # Unknown-size Folded Reload
+; RV32-NEXT: vand.vv v24, v24, v16, v0.t
+; RV32-NEXT: csrr a4, vlenb
+; RV32-NEXT: li a5, 24
+; RV32-NEXT: mul a4, a4, a5
+; RV32-NEXT: add a4, sp, a4
+; RV32-NEXT: addi a4, a4, 16
+; RV32-NEXT: vs8r.v v24, (a4) # Unknown-size Folded Spill
+; RV32-NEXT: csrr a4, vlenb
+; RV32-NEXT: li a5, 40
+; RV32-NEXT: mul a4, a4, a5
+; RV32-NEXT: add a4, sp, a4
+; RV32-NEXT: addi a4, a4, 16
+; RV32-NEXT: vl8r.v v24, (a4) # Unknown-size Folded Reload
+; RV32-NEXT: vsrl.vi v24, v24, 2, v0.t
+; RV32-NEXT: csrr a4, vlenb
+; RV32-NEXT: li a5, 40
+; RV32-NEXT: mul a4, a4, a5
+; RV32-NEXT: add a4, sp, a4
+; RV32-NEXT: addi a4, a4, 16
+; RV32-NEXT: vs8r.v v16, (a4) # Unknown-size Folded Spill
+; RV32-NEXT: vand.vv v16, v24, v16, v0.t
+; RV32-NEXT: csrr a4, vlenb
+; RV32-NEXT: li a5, 24
+; RV32-NEXT: mul a4, a4, a5
+; RV32-NEXT: add a4, sp, a4
+; RV32-NEXT: addi a4, a4, 16
+; RV32-NEXT: vl8r.v v24, (a4) # Unknown-size Folded Reload
+; RV32-NEXT: vadd.vv v16, v24, v16, v0.t
+; RV32-NEXT: vsrl.vi v24, v16, 4, v0.t
+; RV32-NEXT: vadd.vv v24, v16, v24, v0.t
+; RV32-NEXT: vsetvli a4, zero, e64, m8, ta, ma
+; RV32-NEXT: vlse64.v v16, (a3), zero
+; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma
+; RV32-NEXT: csrr a4, vlenb
+; RV32-NEXT: li a5, 24
+; RV32-NEXT: mul a4, a4, a5
+; RV32-NEXT: add a4, sp, a4
+; RV32-NEXT: addi a4, a4, 16
+; RV32-NEXT: vs8r.v v16, (a4) # Unknown-size Folded Spill
+; RV32-NEXT: vand.vv v24, v24, v16, v0.t
+; RV32-NEXT: vsetvli a4, zero, e64, m8, ta, ma
+; RV32-NEXT: vlse64.v v16, (a3), zero
+; RV32-NEXT: addi a3, sp, 16
+; RV32-NEXT: vs8r.v v8, (a3) # Unknown-size Folded Spill
+; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma
+; RV32-NEXT: csrr a2, vlenb
+; RV32-NEXT: slli a2, a2, 4
+; RV32-NEXT: add a2, sp, a2
+; RV32-NEXT: addi a2, a2, 16
+; RV32-NEXT: vs8r.v v16, (a2) # Unknown-size Folded Spill
+; RV32-NEXT: vmul.vv v16, v24, v16, v0.t
+; RV32-NEXT: li a2, 56
+; RV32-NEXT: vsrl.vx v8, v16, a2, v0.t
+; RV32-NEXT: csrr a3, vlenb
+; RV32-NEXT: slli a3, a3, 3
+; RV32-NEXT: add a3, sp, a3
+; RV32-NEXT: addi a3, a3, 16
+; RV32-NEXT: vs8r.v v8, (a3) # Unknown-size Folded Spill
+; RV32-NEXT: bltu a0, a1, .LBB46_2
+; RV32-NEXT: # %bb.1:
+; RV32-NEXT: mv a0, a1
+; RV32-NEXT: .LBB46_2:
+; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV32-NEXT: vmv1r.v v0, v1
+; RV32-NEXT: addi a0, sp, 16
+; RV32-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vsrl.vi v24, v8, 1, v0.t
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 5
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vand.vv v16, v24, v16, v0.t
+; RV32-NEXT: vsub.vv v8, v8, v16, v0.t
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: li a1, 40
+; RV32-NEXT: mul a0, a0, a1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vand.vv v24, v8, v16, v0.t
+; RV32-NEXT: vsrl.vi v8, v8, 2, v0.t
+; RV32-NEXT: vand.vv v8, v8, v16, v0.t
+; RV32-NEXT: vadd.vv v8, v24, v8, v0.t
+; RV32-NEXT: vsrl.vi v16, v8, 4, v0.t
+; RV32-NEXT: vadd.vv v8, v8, v16, v0.t
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: li a1, 24
+; RV32-NEXT: mul a0, a0, a1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vand.vv v8, v8, v16, v0.t
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 4
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vmul.vv v8, v8, v16, v0.t
+; RV32-NEXT: vsrl.vx v8, v8, a2, v0.t
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 3
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: li a1, 48
+; RV32-NEXT: mul a0, a0, a1
+; RV32-NEXT: add sp, sp, a0
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vp_ctpop_nxv16i64:
+; RV64: # %bb.0:
+; RV64-NEXT: addi sp, sp, -16
+; RV64-NEXT: .cfi_def_cfa_offset 16
+; RV64-NEXT: csrr a1, vlenb
+; RV64-NEXT: slli a1, a1, 4
+; RV64-NEXT: sub sp, sp, a1
+; RV64-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 16 * vlenb
+; RV64-NEXT: vmv1r.v v24, v0
+; RV64-NEXT: csrr a1, vlenb
+; RV64-NEXT: slli a1, a1, 3
+; RV64-NEXT: add a1, sp, a1
+; RV64-NEXT: addi a1, a1, 16
+; RV64-NEXT: vs8r.v v8, (a1) # Unknown-size Folded Spill
+; RV64-NEXT: csrr a1, vlenb
+; RV64-NEXT: srli a2, a1, 3
+; RV64-NEXT: vsetvli a3, zero, e8, mf4, ta, ma
+; RV64-NEXT: vslidedown.vx v0, v0, a2
+; RV64-NEXT: sub a2, a0, a1
+; RV64-NEXT: sltu a3, a0, a2
+; RV64-NEXT: addi a3, a3, -1
+; RV64-NEXT: and a2, a3, a2
+; RV64-NEXT: vsetvli zero, a2, e64, m8, ta, ma
+; RV64-NEXT: lui a2, %hi(.LCPI46_0)
+; RV64-NEXT: ld a3, %lo(.LCPI46_0)(a2)
+; RV64-NEXT: lui a2, %hi(.LCPI46_1)
+; RV64-NEXT: ld a2, %lo(.LCPI46_1)(a2)
+; RV64-NEXT: vsrl.vi v8, v16, 1, v0.t
+; RV64-NEXT: vand.vx v8, v8, a3, v0.t
+; RV64-NEXT: vsub.vv v8, v16, v8, v0.t
+; RV64-NEXT: vand.vx v16, v8, a2, v0.t
+; RV64-NEXT: vsrl.vi v8, v8, 2, v0.t
+; RV64-NEXT: vand.vx v8, v8, a2, v0.t
+; RV64-NEXT: vadd.vv v16, v16, v8, v0.t
+; RV64-NEXT: lui a4, %hi(.LCPI46_2)
+; RV64-NEXT: ld a4, %lo(.LCPI46_2)(a4)
+; RV64-NEXT: lui a5, %hi(.LCPI46_3)
+; RV64-NEXT: ld a5, %lo(.LCPI46_3)(a5)
+; RV64-NEXT: vsrl.vi v8, v16, 4, v0.t
+; RV64-NEXT: vadd.vv v8, v16, v8, v0.t
+; RV64-NEXT: vand.vx v8, v8, a4, v0.t
+; RV64-NEXT: vmul.vx v8, v8, a5, v0.t
+; RV64-NEXT: li a6, 56
+; RV64-NEXT: vsrl.vx v8, v8, a6, v0.t
+; RV64-NEXT: addi a7, sp, 16
+; RV64-NEXT: vs8r.v v8, (a7) # Unknown-size Folded Spill
+; RV64-NEXT: bltu a0, a1, .LBB46_2
+; RV64-NEXT: # %bb.1:
+; RV64-NEXT: mv a0, a1
+; RV64-NEXT: .LBB46_2:
+; RV64-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV64-NEXT: vmv1r.v v0, v24
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 3
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vsrl.vi v8, v16, 1, v0.t
+; RV64-NEXT: vand.vx v8, v8, a3, v0.t
+; RV64-NEXT: vsub.vv v8, v16, v8, v0.t
+; RV64-NEXT: vand.vx v16, v8, a2, v0.t
+; RV64-NEXT: vsrl.vi v8, v8, 2, v0.t
+; RV64-NEXT: vand.vx v8, v8, a2, v0.t
+; RV64-NEXT: vadd.vv v8, v16, v8, v0.t
+; RV64-NEXT: vsrl.vi v16, v8, 4, v0.t
+; RV64-NEXT: vadd.vv v8, v8, v16, v0.t
+; RV64-NEXT: vand.vx v8, v8, a4, v0.t
+; RV64-NEXT: vmul.vx v8, v8, a5, v0.t
+; RV64-NEXT: vsrl.vx v8, v8, a6, v0.t
+; RV64-NEXT: addi a0, sp, 16
+; RV64-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 4
+; RV64-NEXT: add sp, sp, a0
+; RV64-NEXT: addi sp, sp, 16
+; RV64-NEXT: ret
+ %v = call <vscale x 16 x i64> @llvm.vp.ctpop.nxv16i64(<vscale x 16 x i64> %va, <vscale x 16 x i1> %m, i32 %evl)
+ ret <vscale x 16 x i64> %v
+}
+
+define <vscale x 16 x i64> @vp_ctpop_nxv16i64_unmasked(<vscale x 16 x i64> %va, i32 zeroext %evl) {
+; RV32-LABEL: vp_ctpop_nxv16i64_unmasked:
+; RV32: # %bb.0:
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: .cfi_def_cfa_offset 16
+; RV32-NEXT: csrr a1, vlenb
+; RV32-NEXT: slli a1, a1, 5
+; RV32-NEXT: sub sp, sp, a1
+; RV32-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x20, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 32 * vlenb
+; RV32-NEXT: lui a1, 349525
+; RV32-NEXT: addi a1, a1, 1365
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a1, 8(sp)
+; RV32-NEXT: lui a1, 209715
+; RV32-NEXT: addi a1, a1, 819
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a1, 8(sp)
+; RV32-NEXT: lui a1, 61681
+; RV32-NEXT: addi a1, a1, -241
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a1, 8(sp)
+; RV32-NEXT: lui a1, 4112
+; RV32-NEXT: addi a1, a1, 257
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a1, 8(sp)
+; RV32-NEXT: csrr a1, vlenb
+; RV32-NEXT: sub a2, a0, a1
+; RV32-NEXT: sltu a3, a0, a2
+; RV32-NEXT: addi a3, a3, -1
+; RV32-NEXT: and a2, a3, a2
+; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma
+; RV32-NEXT: vsrl.vi v24, v16, 1
+; RV32-NEXT: addi a3, sp, 8
+; RV32-NEXT: vsetvli a4, zero, e64, m8, ta, ma
+; RV32-NEXT: vlse64.v v0, (a3), zero
+; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma
+; RV32-NEXT: csrr a4, vlenb
+; RV32-NEXT: li a5, 24
+; RV32-NEXT: mul a4, a4, a5
+; RV32-NEXT: add a4, sp, a4
+; RV32-NEXT: addi a4, a4, 16
+; RV32-NEXT: vs8r.v v0, (a4) # Unknown-size Folded Spill
+; RV32-NEXT: vand.vv v24, v24, v0
+; RV32-NEXT: vsub.vv v16, v16, v24
+; RV32-NEXT: vsetvli a4, zero, e64, m8, ta, ma
+; RV32-NEXT: vlse64.v v0, (a3), zero
+; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma
+; RV32-NEXT: vand.vv v24, v16, v0
+; RV32-NEXT: vsrl.vi v16, v16, 2
+; RV32-NEXT: vand.vv v16, v16, v0
+; RV32-NEXT: vadd.vv v16, v24, v16
+; RV32-NEXT: vsrl.vi v24, v16, 4
+; RV32-NEXT: vadd.vv v24, v16, v24
+; RV32-NEXT: vsetvli a4, zero, e64, m8, ta, ma
+; RV32-NEXT: vlse64.v v16, (a3), zero
+; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma
+; RV32-NEXT: csrr a4, vlenb
+; RV32-NEXT: slli a4, a4, 4
+; RV32-NEXT: add a4, sp, a4
+; RV32-NEXT: addi a4, a4, 16
+; RV32-NEXT: vs8r.v v16, (a4) # Unknown-size Folded Spill
+; RV32-NEXT: vand.vv v16, v24, v16
+; RV32-NEXT: vsetvli a4, zero, e64, m8, ta, ma
+; RV32-NEXT: vlse64.v v24, (a3), zero
+; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma
+; RV32-NEXT: csrr a2, vlenb
+; RV32-NEXT: slli a2, a2, 3
+; RV32-NEXT: add a2, sp, a2
+; RV32-NEXT: addi a2, a2, 16
+; RV32-NEXT: vs8r.v v24, (a2) # Unknown-size Folded Spill
+; RV32-NEXT: vmul.vv v16, v16, v24
+; RV32-NEXT: li a2, 56
+; RV32-NEXT: vsrl.vx v16, v16, a2
+; RV32-NEXT: addi a3, sp, 16
+; RV32-NEXT: vs8r.v v16, (a3) # Unknown-size Folded Spill
+; RV32-NEXT: bltu a0, a1, .LBB47_2
+; RV32-NEXT: # %bb.1:
+; RV32-NEXT: mv a0, a1
+; RV32-NEXT: .LBB47_2:
+; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV32-NEXT: vsrl.vi v16, v8, 1
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: li a1, 24
+; RV32-NEXT: mul a0, a0, a1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vand.vv v16, v16, v24
+; RV32-NEXT: vsub.vv v8, v8, v16
+; RV32-NEXT: vand.vv v16, v8, v0
+; RV32-NEXT: vsrl.vi v8, v8, 2
+; RV32-NEXT: vand.vv v8, v8, v0
+; RV32-NEXT: vadd.vv v8, v16, v8
+; RV32-NEXT: vsrl.vi v16, v8, 4
+; RV32-NEXT: vadd.vv v8, v8, v16
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 4
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vand.vv v8, v8, v16
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 3
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vmul.vv v8, v8, v16
+; RV32-NEXT: vsrl.vx v8, v8, a2
+; RV32-NEXT: addi a0, sp, 16
+; RV32-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 5
+; RV32-NEXT: add sp, sp, a0
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vp_ctpop_nxv16i64_unmasked:
+; RV64: # %bb.0:
+; RV64-NEXT: csrr a1, vlenb
+; RV64-NEXT: sub a2, a0, a1
+; RV64-NEXT: sltu a3, a0, a2
+; RV64-NEXT: addi a3, a3, -1
+; RV64-NEXT: and a2, a3, a2
+; RV64-NEXT: vsetvli zero, a2, e64, m8, ta, ma
+; RV64-NEXT: lui a2, %hi(.LCPI47_0)
+; RV64-NEXT: ld a2, %lo(.LCPI47_0)(a2)
+; RV64-NEXT: lui a3, %hi(.LCPI47_1)
+; RV64-NEXT: ld a3, %lo(.LCPI47_1)(a3)
+; RV64-NEXT: vsrl.vi v24, v16, 1
+; RV64-NEXT: vand.vx v24, v24, a2
+; RV64-NEXT: vsub.vv v16, v16, v24
+; RV64-NEXT: vand.vx v24, v16, a3
+; RV64-NEXT: vsrl.vi v16, v16, 2
+; RV64-NEXT: vand.vx v16, v16, a3
+; RV64-NEXT: vadd.vv v16, v24, v16
+; RV64-NEXT: lui a4, %hi(.LCPI47_2)
+; RV64-NEXT: ld a4, %lo(.LCPI47_2)(a4)
+; RV64-NEXT: lui a5, %hi(.LCPI47_3)
+; RV64-NEXT: ld a5, %lo(.LCPI47_3)(a5)
+; RV64-NEXT: vsrl.vi v24, v16, 4
+; RV64-NEXT: vadd.vv v16, v16, v24
+; RV64-NEXT: vand.vx v16, v16, a4
+; RV64-NEXT: vmul.vx v16, v16, a5
+; RV64-NEXT: li a6, 56
+; RV64-NEXT: vsrl.vx v16, v16, a6
+; RV64-NEXT: bltu a0, a1, .LBB47_2
+; RV64-NEXT: # %bb.1:
+; RV64-NEXT: mv a0, a1
+; RV64-NEXT: .LBB47_2:
+; RV64-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV64-NEXT: vsrl.vi v24, v8, 1
+; RV64-NEXT: vand.vx v24, v24, a2
+; RV64-NEXT: vsub.vv v8, v8, v24
+; RV64-NEXT: vand.vx v24, v8, a3
+; RV64-NEXT: vsrl.vi v8, v8, 2
+; RV64-NEXT: vand.vx v8, v8, a3
+; RV64-NEXT: vadd.vv v8, v24, v8
+; RV64-NEXT: vsrl.vi v24, v8, 4
+; RV64-NEXT: vadd.vv v8, v8, v24
+; RV64-NEXT: vand.vx v8, v8, a4
+; RV64-NEXT: vmul.vx v8, v8, a5
+; RV64-NEXT: vsrl.vx v8, v8, a6
+; RV64-NEXT: ret
+ %head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
+ %m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
+ %v = call <vscale x 16 x i64> @llvm.vp.ctpop.nxv16i64(<vscale x 16 x i64> %va, <vscale x 16 x i1> %m, i32 %evl)
+ ret <vscale x 16 x i64> %v
+}
--- /dev/null
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=riscv32 -mattr=+d,+zfh,+experimental-zvfh,+v,+m -target-abi=ilp32d -riscv-v-vector-bits-min=128 \
+; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV32
+; RUN: llc -mtriple=riscv64 -mattr=+d,+zfh,+experimental-zvfh,+v,+m -target-abi=lp64d -riscv-v-vector-bits-min=128 \
+; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV64
+
+declare <2 x i8> @llvm.vp.ctpop.v2i8(<2 x i8>, <2 x i1>, i32)
+
+define <2 x i8> @vp_ctpop_v2i8(<2 x i8> %va, <2 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vp_ctpop_v2i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
+; CHECK-NEXT: vsrl.vi v9, v8, 1, v0.t
+; CHECK-NEXT: li a0, 85
+; CHECK-NEXT: vand.vx v9, v9, a0, v0.t
+; CHECK-NEXT: vsub.vv v8, v8, v9, v0.t
+; CHECK-NEXT: li a0, 51
+; CHECK-NEXT: vand.vx v9, v8, a0, v0.t
+; CHECK-NEXT: vsrl.vi v8, v8, 2, v0.t
+; CHECK-NEXT: vand.vx v8, v8, a0, v0.t
+; CHECK-NEXT: vadd.vv v8, v9, v8, v0.t
+; CHECK-NEXT: vsrl.vi v9, v8, 4, v0.t
+; CHECK-NEXT: vadd.vv v8, v8, v9, v0.t
+; CHECK-NEXT: vand.vi v8, v8, 15, v0.t
+; CHECK-NEXT: ret
+ %v = call <2 x i8> @llvm.vp.ctpop.v2i8(<2 x i8> %va, <2 x i1> %m, i32 %evl)
+ ret <2 x i8> %v
+}
+
+define <2 x i8> @vp_ctpop_v2i8_unmasked(<2 x i8> %va, i32 zeroext %evl) {
+; CHECK-LABEL: vp_ctpop_v2i8_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
+; CHECK-NEXT: vsrl.vi v9, v8, 1
+; CHECK-NEXT: li a0, 85
+; CHECK-NEXT: vand.vx v9, v9, a0
+; CHECK-NEXT: vsub.vv v8, v8, v9
+; CHECK-NEXT: li a0, 51
+; CHECK-NEXT: vand.vx v9, v8, a0
+; CHECK-NEXT: vsrl.vi v8, v8, 2
+; CHECK-NEXT: vand.vx v8, v8, a0
+; CHECK-NEXT: vadd.vv v8, v9, v8
+; CHECK-NEXT: vsrl.vi v9, v8, 4
+; CHECK-NEXT: vadd.vv v8, v8, v9
+; CHECK-NEXT: vand.vi v8, v8, 15
+; CHECK-NEXT: ret
+ %head = insertelement <2 x i1> poison, i1 true, i32 0
+ %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
+ %v = call <2 x i8> @llvm.vp.ctpop.v2i8(<2 x i8> %va, <2 x i1> %m, i32 %evl)
+ ret <2 x i8> %v
+}
+
+declare <4 x i8> @llvm.vp.ctpop.v4i8(<4 x i8>, <4 x i1>, i32)
+
+define <4 x i8> @vp_ctpop_v4i8(<4 x i8> %va, <4 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vp_ctpop_v4i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
+; CHECK-NEXT: vsrl.vi v9, v8, 1, v0.t
+; CHECK-NEXT: li a0, 85
+; CHECK-NEXT: vand.vx v9, v9, a0, v0.t
+; CHECK-NEXT: vsub.vv v8, v8, v9, v0.t
+; CHECK-NEXT: li a0, 51
+; CHECK-NEXT: vand.vx v9, v8, a0, v0.t
+; CHECK-NEXT: vsrl.vi v8, v8, 2, v0.t
+; CHECK-NEXT: vand.vx v8, v8, a0, v0.t
+; CHECK-NEXT: vadd.vv v8, v9, v8, v0.t
+; CHECK-NEXT: vsrl.vi v9, v8, 4, v0.t
+; CHECK-NEXT: vadd.vv v8, v8, v9, v0.t
+; CHECK-NEXT: vand.vi v8, v8, 15, v0.t
+; CHECK-NEXT: ret
+ %v = call <4 x i8> @llvm.vp.ctpop.v4i8(<4 x i8> %va, <4 x i1> %m, i32 %evl)
+ ret <4 x i8> %v
+}
+
+define <4 x i8> @vp_ctpop_v4i8_unmasked(<4 x i8> %va, i32 zeroext %evl) {
+; CHECK-LABEL: vp_ctpop_v4i8_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
+; CHECK-NEXT: vsrl.vi v9, v8, 1
+; CHECK-NEXT: li a0, 85
+; CHECK-NEXT: vand.vx v9, v9, a0
+; CHECK-NEXT: vsub.vv v8, v8, v9
+; CHECK-NEXT: li a0, 51
+; CHECK-NEXT: vand.vx v9, v8, a0
+; CHECK-NEXT: vsrl.vi v8, v8, 2
+; CHECK-NEXT: vand.vx v8, v8, a0
+; CHECK-NEXT: vadd.vv v8, v9, v8
+; CHECK-NEXT: vsrl.vi v9, v8, 4
+; CHECK-NEXT: vadd.vv v8, v8, v9
+; CHECK-NEXT: vand.vi v8, v8, 15
+; CHECK-NEXT: ret
+ %head = insertelement <4 x i1> poison, i1 true, i32 0
+ %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
+ %v = call <4 x i8> @llvm.vp.ctpop.v4i8(<4 x i8> %va, <4 x i1> %m, i32 %evl)
+ ret <4 x i8> %v
+}
+
+declare <8 x i8> @llvm.vp.ctpop.v8i8(<8 x i8>, <8 x i1>, i32)
+
+define <8 x i8> @vp_ctpop_v8i8(<8 x i8> %va, <8 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vp_ctpop_v8i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
+; CHECK-NEXT: vsrl.vi v9, v8, 1, v0.t
+; CHECK-NEXT: li a0, 85
+; CHECK-NEXT: vand.vx v9, v9, a0, v0.t
+; CHECK-NEXT: vsub.vv v8, v8, v9, v0.t
+; CHECK-NEXT: li a0, 51
+; CHECK-NEXT: vand.vx v9, v8, a0, v0.t
+; CHECK-NEXT: vsrl.vi v8, v8, 2, v0.t
+; CHECK-NEXT: vand.vx v8, v8, a0, v0.t
+; CHECK-NEXT: vadd.vv v8, v9, v8, v0.t
+; CHECK-NEXT: vsrl.vi v9, v8, 4, v0.t
+; CHECK-NEXT: vadd.vv v8, v8, v9, v0.t
+; CHECK-NEXT: vand.vi v8, v8, 15, v0.t
+; CHECK-NEXT: ret
+ %v = call <8 x i8> @llvm.vp.ctpop.v8i8(<8 x i8> %va, <8 x i1> %m, i32 %evl)
+ ret <8 x i8> %v
+}
+
+define <8 x i8> @vp_ctpop_v8i8_unmasked(<8 x i8> %va, i32 zeroext %evl) {
+; CHECK-LABEL: vp_ctpop_v8i8_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
+; CHECK-NEXT: vsrl.vi v9, v8, 1
+; CHECK-NEXT: li a0, 85
+; CHECK-NEXT: vand.vx v9, v9, a0
+; CHECK-NEXT: vsub.vv v8, v8, v9
+; CHECK-NEXT: li a0, 51
+; CHECK-NEXT: vand.vx v9, v8, a0
+; CHECK-NEXT: vsrl.vi v8, v8, 2
+; CHECK-NEXT: vand.vx v8, v8, a0
+; CHECK-NEXT: vadd.vv v8, v9, v8
+; CHECK-NEXT: vsrl.vi v9, v8, 4
+; CHECK-NEXT: vadd.vv v8, v8, v9
+; CHECK-NEXT: vand.vi v8, v8, 15
+; CHECK-NEXT: ret
+ %head = insertelement <8 x i1> poison, i1 true, i32 0
+ %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
+ %v = call <8 x i8> @llvm.vp.ctpop.v8i8(<8 x i8> %va, <8 x i1> %m, i32 %evl)
+ ret <8 x i8> %v
+}
+
+declare <16 x i8> @llvm.vp.ctpop.v16i8(<16 x i8>, <16 x i1>, i32)
+
+define <16 x i8> @vp_ctpop_v16i8(<16 x i8> %va, <16 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vp_ctpop_v16i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
+; CHECK-NEXT: vsrl.vi v9, v8, 1, v0.t
+; CHECK-NEXT: li a0, 85
+; CHECK-NEXT: vand.vx v9, v9, a0, v0.t
+; CHECK-NEXT: vsub.vv v8, v8, v9, v0.t
+; CHECK-NEXT: li a0, 51
+; CHECK-NEXT: vand.vx v9, v8, a0, v0.t
+; CHECK-NEXT: vsrl.vi v8, v8, 2, v0.t
+; CHECK-NEXT: vand.vx v8, v8, a0, v0.t
+; CHECK-NEXT: vadd.vv v8, v9, v8, v0.t
+; CHECK-NEXT: vsrl.vi v9, v8, 4, v0.t
+; CHECK-NEXT: vadd.vv v8, v8, v9, v0.t
+; CHECK-NEXT: vand.vi v8, v8, 15, v0.t
+; CHECK-NEXT: ret
+ %v = call <16 x i8> @llvm.vp.ctpop.v16i8(<16 x i8> %va, <16 x i1> %m, i32 %evl)
+ ret <16 x i8> %v
+}
+
+define <16 x i8> @vp_ctpop_v16i8_unmasked(<16 x i8> %va, i32 zeroext %evl) {
+; CHECK-LABEL: vp_ctpop_v16i8_unmasked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
+; CHECK-NEXT: vsrl.vi v9, v8, 1
+; CHECK-NEXT: li a0, 85
+; CHECK-NEXT: vand.vx v9, v9, a0
+; CHECK-NEXT: vsub.vv v8, v8, v9
+; CHECK-NEXT: li a0, 51
+; CHECK-NEXT: vand.vx v9, v8, a0
+; CHECK-NEXT: vsrl.vi v8, v8, 2
+; CHECK-NEXT: vand.vx v8, v8, a0
+; CHECK-NEXT: vadd.vv v8, v9, v8
+; CHECK-NEXT: vsrl.vi v9, v8, 4
+; CHECK-NEXT: vadd.vv v8, v8, v9
+; CHECK-NEXT: vand.vi v8, v8, 15
+; CHECK-NEXT: ret
+ %head = insertelement <16 x i1> poison, i1 true, i32 0
+ %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
+ %v = call <16 x i8> @llvm.vp.ctpop.v16i8(<16 x i8> %va, <16 x i1> %m, i32 %evl)
+ ret <16 x i8> %v
+}
+
+declare <2 x i16> @llvm.vp.ctpop.v2i16(<2 x i16>, <2 x i1>, i32)
+
+define <2 x i16> @vp_ctpop_v2i16(<2 x i16> %va, <2 x i1> %m, i32 zeroext %evl) {
+; RV32-LABEL: vp_ctpop_v2i16:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
+; RV32-NEXT: vsrl.vi v9, v8, 1, v0.t
+; RV32-NEXT: lui a0, 5
+; RV32-NEXT: addi a0, a0, 1365
+; RV32-NEXT: vand.vx v9, v9, a0, v0.t
+; RV32-NEXT: vsub.vv v8, v8, v9, v0.t
+; RV32-NEXT: lui a0, 3
+; RV32-NEXT: addi a0, a0, 819
+; RV32-NEXT: vand.vx v9, v8, a0, v0.t
+; RV32-NEXT: vsrl.vi v8, v8, 2, v0.t
+; RV32-NEXT: vand.vx v8, v8, a0, v0.t
+; RV32-NEXT: vadd.vv v8, v9, v8, v0.t
+; RV32-NEXT: vsrl.vi v9, v8, 4, v0.t
+; RV32-NEXT: vadd.vv v8, v8, v9, v0.t
+; RV32-NEXT: lui a0, 1
+; RV32-NEXT: addi a0, a0, -241
+; RV32-NEXT: vand.vx v8, v8, a0, v0.t
+; RV32-NEXT: li a0, 257
+; RV32-NEXT: vmul.vx v8, v8, a0, v0.t
+; RV32-NEXT: vsrl.vi v8, v8, 8, v0.t
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vp_ctpop_v2i16:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
+; RV64-NEXT: vsrl.vi v9, v8, 1, v0.t
+; RV64-NEXT: lui a0, 5
+; RV64-NEXT: addiw a0, a0, 1365
+; RV64-NEXT: vand.vx v9, v9, a0, v0.t
+; RV64-NEXT: vsub.vv v8, v8, v9, v0.t
+; RV64-NEXT: lui a0, 3
+; RV64-NEXT: addiw a0, a0, 819
+; RV64-NEXT: vand.vx v9, v8, a0, v0.t
+; RV64-NEXT: vsrl.vi v8, v8, 2, v0.t
+; RV64-NEXT: vand.vx v8, v8, a0, v0.t
+; RV64-NEXT: vadd.vv v8, v9, v8, v0.t
+; RV64-NEXT: vsrl.vi v9, v8, 4, v0.t
+; RV64-NEXT: vadd.vv v8, v8, v9, v0.t
+; RV64-NEXT: lui a0, 1
+; RV64-NEXT: addiw a0, a0, -241
+; RV64-NEXT: vand.vx v8, v8, a0, v0.t
+; RV64-NEXT: li a0, 257
+; RV64-NEXT: vmul.vx v8, v8, a0, v0.t
+; RV64-NEXT: vsrl.vi v8, v8, 8, v0.t
+; RV64-NEXT: ret
+ %v = call <2 x i16> @llvm.vp.ctpop.v2i16(<2 x i16> %va, <2 x i1> %m, i32 %evl)
+ ret <2 x i16> %v
+}
+
+define <2 x i16> @vp_ctpop_v2i16_unmasked(<2 x i16> %va, i32 zeroext %evl) {
+; RV32-LABEL: vp_ctpop_v2i16_unmasked:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
+; RV32-NEXT: vsrl.vi v9, v8, 1
+; RV32-NEXT: lui a0, 5
+; RV32-NEXT: addi a0, a0, 1365
+; RV32-NEXT: vand.vx v9, v9, a0
+; RV32-NEXT: vsub.vv v8, v8, v9
+; RV32-NEXT: lui a0, 3
+; RV32-NEXT: addi a0, a0, 819
+; RV32-NEXT: vand.vx v9, v8, a0
+; RV32-NEXT: vsrl.vi v8, v8, 2
+; RV32-NEXT: vand.vx v8, v8, a0
+; RV32-NEXT: vadd.vv v8, v9, v8
+; RV32-NEXT: vsrl.vi v9, v8, 4
+; RV32-NEXT: vadd.vv v8, v8, v9
+; RV32-NEXT: lui a0, 1
+; RV32-NEXT: addi a0, a0, -241
+; RV32-NEXT: vand.vx v8, v8, a0
+; RV32-NEXT: li a0, 257
+; RV32-NEXT: vmul.vx v8, v8, a0
+; RV32-NEXT: vsrl.vi v8, v8, 8
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vp_ctpop_v2i16_unmasked:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
+; RV64-NEXT: vsrl.vi v9, v8, 1
+; RV64-NEXT: lui a0, 5
+; RV64-NEXT: addiw a0, a0, 1365
+; RV64-NEXT: vand.vx v9, v9, a0
+; RV64-NEXT: vsub.vv v8, v8, v9
+; RV64-NEXT: lui a0, 3
+; RV64-NEXT: addiw a0, a0, 819
+; RV64-NEXT: vand.vx v9, v8, a0
+; RV64-NEXT: vsrl.vi v8, v8, 2
+; RV64-NEXT: vand.vx v8, v8, a0
+; RV64-NEXT: vadd.vv v8, v9, v8
+; RV64-NEXT: vsrl.vi v9, v8, 4
+; RV64-NEXT: vadd.vv v8, v8, v9
+; RV64-NEXT: lui a0, 1
+; RV64-NEXT: addiw a0, a0, -241
+; RV64-NEXT: vand.vx v8, v8, a0
+; RV64-NEXT: li a0, 257
+; RV64-NEXT: vmul.vx v8, v8, a0
+; RV64-NEXT: vsrl.vi v8, v8, 8
+; RV64-NEXT: ret
+ %head = insertelement <2 x i1> poison, i1 true, i32 0
+ %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
+ %v = call <2 x i16> @llvm.vp.ctpop.v2i16(<2 x i16> %va, <2 x i1> %m, i32 %evl)
+ ret <2 x i16> %v
+}
+
+declare <4 x i16> @llvm.vp.ctpop.v4i16(<4 x i16>, <4 x i1>, i32)
+
+define <4 x i16> @vp_ctpop_v4i16(<4 x i16> %va, <4 x i1> %m, i32 zeroext %evl) {
+; RV32-LABEL: vp_ctpop_v4i16:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
+; RV32-NEXT: vsrl.vi v9, v8, 1, v0.t
+; RV32-NEXT: lui a0, 5
+; RV32-NEXT: addi a0, a0, 1365
+; RV32-NEXT: vand.vx v9, v9, a0, v0.t
+; RV32-NEXT: vsub.vv v8, v8, v9, v0.t
+; RV32-NEXT: lui a0, 3
+; RV32-NEXT: addi a0, a0, 819
+; RV32-NEXT: vand.vx v9, v8, a0, v0.t
+; RV32-NEXT: vsrl.vi v8, v8, 2, v0.t
+; RV32-NEXT: vand.vx v8, v8, a0, v0.t
+; RV32-NEXT: vadd.vv v8, v9, v8, v0.t
+; RV32-NEXT: vsrl.vi v9, v8, 4, v0.t
+; RV32-NEXT: vadd.vv v8, v8, v9, v0.t
+; RV32-NEXT: lui a0, 1
+; RV32-NEXT: addi a0, a0, -241
+; RV32-NEXT: vand.vx v8, v8, a0, v0.t
+; RV32-NEXT: li a0, 257
+; RV32-NEXT: vmul.vx v8, v8, a0, v0.t
+; RV32-NEXT: vsrl.vi v8, v8, 8, v0.t
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vp_ctpop_v4i16:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
+; RV64-NEXT: vsrl.vi v9, v8, 1, v0.t
+; RV64-NEXT: lui a0, 5
+; RV64-NEXT: addiw a0, a0, 1365
+; RV64-NEXT: vand.vx v9, v9, a0, v0.t
+; RV64-NEXT: vsub.vv v8, v8, v9, v0.t
+; RV64-NEXT: lui a0, 3
+; RV64-NEXT: addiw a0, a0, 819
+; RV64-NEXT: vand.vx v9, v8, a0, v0.t
+; RV64-NEXT: vsrl.vi v8, v8, 2, v0.t
+; RV64-NEXT: vand.vx v8, v8, a0, v0.t
+; RV64-NEXT: vadd.vv v8, v9, v8, v0.t
+; RV64-NEXT: vsrl.vi v9, v8, 4, v0.t
+; RV64-NEXT: vadd.vv v8, v8, v9, v0.t
+; RV64-NEXT: lui a0, 1
+; RV64-NEXT: addiw a0, a0, -241
+; RV64-NEXT: vand.vx v8, v8, a0, v0.t
+; RV64-NEXT: li a0, 257
+; RV64-NEXT: vmul.vx v8, v8, a0, v0.t
+; RV64-NEXT: vsrl.vi v8, v8, 8, v0.t
+; RV64-NEXT: ret
+ %v = call <4 x i16> @llvm.vp.ctpop.v4i16(<4 x i16> %va, <4 x i1> %m, i32 %evl)
+ ret <4 x i16> %v
+}
+
+define <4 x i16> @vp_ctpop_v4i16_unmasked(<4 x i16> %va, i32 zeroext %evl) {
+; RV32-LABEL: vp_ctpop_v4i16_unmasked:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
+; RV32-NEXT: vsrl.vi v9, v8, 1
+; RV32-NEXT: lui a0, 5
+; RV32-NEXT: addi a0, a0, 1365
+; RV32-NEXT: vand.vx v9, v9, a0
+; RV32-NEXT: vsub.vv v8, v8, v9
+; RV32-NEXT: lui a0, 3
+; RV32-NEXT: addi a0, a0, 819
+; RV32-NEXT: vand.vx v9, v8, a0
+; RV32-NEXT: vsrl.vi v8, v8, 2
+; RV32-NEXT: vand.vx v8, v8, a0
+; RV32-NEXT: vadd.vv v8, v9, v8
+; RV32-NEXT: vsrl.vi v9, v8, 4
+; RV32-NEXT: vadd.vv v8, v8, v9
+; RV32-NEXT: lui a0, 1
+; RV32-NEXT: addi a0, a0, -241
+; RV32-NEXT: vand.vx v8, v8, a0
+; RV32-NEXT: li a0, 257
+; RV32-NEXT: vmul.vx v8, v8, a0
+; RV32-NEXT: vsrl.vi v8, v8, 8
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vp_ctpop_v4i16_unmasked:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
+; RV64-NEXT: vsrl.vi v9, v8, 1
+; RV64-NEXT: lui a0, 5
+; RV64-NEXT: addiw a0, a0, 1365
+; RV64-NEXT: vand.vx v9, v9, a0
+; RV64-NEXT: vsub.vv v8, v8, v9
+; RV64-NEXT: lui a0, 3
+; RV64-NEXT: addiw a0, a0, 819
+; RV64-NEXT: vand.vx v9, v8, a0
+; RV64-NEXT: vsrl.vi v8, v8, 2
+; RV64-NEXT: vand.vx v8, v8, a0
+; RV64-NEXT: vadd.vv v8, v9, v8
+; RV64-NEXT: vsrl.vi v9, v8, 4
+; RV64-NEXT: vadd.vv v8, v8, v9
+; RV64-NEXT: lui a0, 1
+; RV64-NEXT: addiw a0, a0, -241
+; RV64-NEXT: vand.vx v8, v8, a0
+; RV64-NEXT: li a0, 257
+; RV64-NEXT: vmul.vx v8, v8, a0
+; RV64-NEXT: vsrl.vi v8, v8, 8
+; RV64-NEXT: ret
+ %head = insertelement <4 x i1> poison, i1 true, i32 0
+ %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
+ %v = call <4 x i16> @llvm.vp.ctpop.v4i16(<4 x i16> %va, <4 x i1> %m, i32 %evl)
+ ret <4 x i16> %v
+}
+
+declare <8 x i16> @llvm.vp.ctpop.v8i16(<8 x i16>, <8 x i1>, i32)
+
+define <8 x i16> @vp_ctpop_v8i16(<8 x i16> %va, <8 x i1> %m, i32 zeroext %evl) {
+; RV32-LABEL: vp_ctpop_v8i16:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetvli zero, a0, e16, m1, ta, ma
+; RV32-NEXT: vsrl.vi v9, v8, 1, v0.t
+; RV32-NEXT: lui a0, 5
+; RV32-NEXT: addi a0, a0, 1365
+; RV32-NEXT: vand.vx v9, v9, a0, v0.t
+; RV32-NEXT: vsub.vv v8, v8, v9, v0.t
+; RV32-NEXT: lui a0, 3
+; RV32-NEXT: addi a0, a0, 819
+; RV32-NEXT: vand.vx v9, v8, a0, v0.t
+; RV32-NEXT: vsrl.vi v8, v8, 2, v0.t
+; RV32-NEXT: vand.vx v8, v8, a0, v0.t
+; RV32-NEXT: vadd.vv v8, v9, v8, v0.t
+; RV32-NEXT: vsrl.vi v9, v8, 4, v0.t
+; RV32-NEXT: vadd.vv v8, v8, v9, v0.t
+; RV32-NEXT: lui a0, 1
+; RV32-NEXT: addi a0, a0, -241
+; RV32-NEXT: vand.vx v8, v8, a0, v0.t
+; RV32-NEXT: li a0, 257
+; RV32-NEXT: vmul.vx v8, v8, a0, v0.t
+; RV32-NEXT: vsrl.vi v8, v8, 8, v0.t
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vp_ctpop_v8i16:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetvli zero, a0, e16, m1, ta, ma
+; RV64-NEXT: vsrl.vi v9, v8, 1, v0.t
+; RV64-NEXT: lui a0, 5
+; RV64-NEXT: addiw a0, a0, 1365
+; RV64-NEXT: vand.vx v9, v9, a0, v0.t
+; RV64-NEXT: vsub.vv v8, v8, v9, v0.t
+; RV64-NEXT: lui a0, 3
+; RV64-NEXT: addiw a0, a0, 819
+; RV64-NEXT: vand.vx v9, v8, a0, v0.t
+; RV64-NEXT: vsrl.vi v8, v8, 2, v0.t
+; RV64-NEXT: vand.vx v8, v8, a0, v0.t
+; RV64-NEXT: vadd.vv v8, v9, v8, v0.t
+; RV64-NEXT: vsrl.vi v9, v8, 4, v0.t
+; RV64-NEXT: vadd.vv v8, v8, v9, v0.t
+; RV64-NEXT: lui a0, 1
+; RV64-NEXT: addiw a0, a0, -241
+; RV64-NEXT: vand.vx v8, v8, a0, v0.t
+; RV64-NEXT: li a0, 257
+; RV64-NEXT: vmul.vx v8, v8, a0, v0.t
+; RV64-NEXT: vsrl.vi v8, v8, 8, v0.t
+; RV64-NEXT: ret
+ %v = call <8 x i16> @llvm.vp.ctpop.v8i16(<8 x i16> %va, <8 x i1> %m, i32 %evl)
+ ret <8 x i16> %v
+}
+
+define <8 x i16> @vp_ctpop_v8i16_unmasked(<8 x i16> %va, i32 zeroext %evl) {
+; RV32-LABEL: vp_ctpop_v8i16_unmasked:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetvli zero, a0, e16, m1, ta, ma
+; RV32-NEXT: vsrl.vi v9, v8, 1
+; RV32-NEXT: lui a0, 5
+; RV32-NEXT: addi a0, a0, 1365
+; RV32-NEXT: vand.vx v9, v9, a0
+; RV32-NEXT: vsub.vv v8, v8, v9
+; RV32-NEXT: lui a0, 3
+; RV32-NEXT: addi a0, a0, 819
+; RV32-NEXT: vand.vx v9, v8, a0
+; RV32-NEXT: vsrl.vi v8, v8, 2
+; RV32-NEXT: vand.vx v8, v8, a0
+; RV32-NEXT: vadd.vv v8, v9, v8
+; RV32-NEXT: vsrl.vi v9, v8, 4
+; RV32-NEXT: vadd.vv v8, v8, v9
+; RV32-NEXT: lui a0, 1
+; RV32-NEXT: addi a0, a0, -241
+; RV32-NEXT: vand.vx v8, v8, a0
+; RV32-NEXT: li a0, 257
+; RV32-NEXT: vmul.vx v8, v8, a0
+; RV32-NEXT: vsrl.vi v8, v8, 8
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vp_ctpop_v8i16_unmasked:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetvli zero, a0, e16, m1, ta, ma
+; RV64-NEXT: vsrl.vi v9, v8, 1
+; RV64-NEXT: lui a0, 5
+; RV64-NEXT: addiw a0, a0, 1365
+; RV64-NEXT: vand.vx v9, v9, a0
+; RV64-NEXT: vsub.vv v8, v8, v9
+; RV64-NEXT: lui a0, 3
+; RV64-NEXT: addiw a0, a0, 819
+; RV64-NEXT: vand.vx v9, v8, a0
+; RV64-NEXT: vsrl.vi v8, v8, 2
+; RV64-NEXT: vand.vx v8, v8, a0
+; RV64-NEXT: vadd.vv v8, v9, v8
+; RV64-NEXT: vsrl.vi v9, v8, 4
+; RV64-NEXT: vadd.vv v8, v8, v9
+; RV64-NEXT: lui a0, 1
+; RV64-NEXT: addiw a0, a0, -241
+; RV64-NEXT: vand.vx v8, v8, a0
+; RV64-NEXT: li a0, 257
+; RV64-NEXT: vmul.vx v8, v8, a0
+; RV64-NEXT: vsrl.vi v8, v8, 8
+; RV64-NEXT: ret
+ %head = insertelement <8 x i1> poison, i1 true, i32 0
+ %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
+ %v = call <8 x i16> @llvm.vp.ctpop.v8i16(<8 x i16> %va, <8 x i1> %m, i32 %evl)
+ ret <8 x i16> %v
+}
+
+declare <16 x i16> @llvm.vp.ctpop.v16i16(<16 x i16>, <16 x i1>, i32)
+
+define <16 x i16> @vp_ctpop_v16i16(<16 x i16> %va, <16 x i1> %m, i32 zeroext %evl) {
+; RV32-LABEL: vp_ctpop_v16i16:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetvli zero, a0, e16, m2, ta, ma
+; RV32-NEXT: vsrl.vi v10, v8, 1, v0.t
+; RV32-NEXT: lui a0, 5
+; RV32-NEXT: addi a0, a0, 1365
+; RV32-NEXT: vand.vx v10, v10, a0, v0.t
+; RV32-NEXT: vsub.vv v8, v8, v10, v0.t
+; RV32-NEXT: lui a0, 3
+; RV32-NEXT: addi a0, a0, 819
+; RV32-NEXT: vand.vx v10, v8, a0, v0.t
+; RV32-NEXT: vsrl.vi v8, v8, 2, v0.t
+; RV32-NEXT: vand.vx v8, v8, a0, v0.t
+; RV32-NEXT: vadd.vv v8, v10, v8, v0.t
+; RV32-NEXT: vsrl.vi v10, v8, 4, v0.t
+; RV32-NEXT: vadd.vv v8, v8, v10, v0.t
+; RV32-NEXT: lui a0, 1
+; RV32-NEXT: addi a0, a0, -241
+; RV32-NEXT: vand.vx v8, v8, a0, v0.t
+; RV32-NEXT: li a0, 257
+; RV32-NEXT: vmul.vx v8, v8, a0, v0.t
+; RV32-NEXT: vsrl.vi v8, v8, 8, v0.t
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vp_ctpop_v16i16:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetvli zero, a0, e16, m2, ta, ma
+; RV64-NEXT: vsrl.vi v10, v8, 1, v0.t
+; RV64-NEXT: lui a0, 5
+; RV64-NEXT: addiw a0, a0, 1365
+; RV64-NEXT: vand.vx v10, v10, a0, v0.t
+; RV64-NEXT: vsub.vv v8, v8, v10, v0.t
+; RV64-NEXT: lui a0, 3
+; RV64-NEXT: addiw a0, a0, 819
+; RV64-NEXT: vand.vx v10, v8, a0, v0.t
+; RV64-NEXT: vsrl.vi v8, v8, 2, v0.t
+; RV64-NEXT: vand.vx v8, v8, a0, v0.t
+; RV64-NEXT: vadd.vv v8, v10, v8, v0.t
+; RV64-NEXT: vsrl.vi v10, v8, 4, v0.t
+; RV64-NEXT: vadd.vv v8, v8, v10, v0.t
+; RV64-NEXT: lui a0, 1
+; RV64-NEXT: addiw a0, a0, -241
+; RV64-NEXT: vand.vx v8, v8, a0, v0.t
+; RV64-NEXT: li a0, 257
+; RV64-NEXT: vmul.vx v8, v8, a0, v0.t
+; RV64-NEXT: vsrl.vi v8, v8, 8, v0.t
+; RV64-NEXT: ret
+ %v = call <16 x i16> @llvm.vp.ctpop.v16i16(<16 x i16> %va, <16 x i1> %m, i32 %evl)
+ ret <16 x i16> %v
+}
+
+define <16 x i16> @vp_ctpop_v16i16_unmasked(<16 x i16> %va, i32 zeroext %evl) {
+; RV32-LABEL: vp_ctpop_v16i16_unmasked:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetvli zero, a0, e16, m2, ta, ma
+; RV32-NEXT: vsrl.vi v10, v8, 1
+; RV32-NEXT: lui a0, 5
+; RV32-NEXT: addi a0, a0, 1365
+; RV32-NEXT: vand.vx v10, v10, a0
+; RV32-NEXT: vsub.vv v8, v8, v10
+; RV32-NEXT: lui a0, 3
+; RV32-NEXT: addi a0, a0, 819
+; RV32-NEXT: vand.vx v10, v8, a0
+; RV32-NEXT: vsrl.vi v8, v8, 2
+; RV32-NEXT: vand.vx v8, v8, a0
+; RV32-NEXT: vadd.vv v8, v10, v8
+; RV32-NEXT: vsrl.vi v10, v8, 4
+; RV32-NEXT: vadd.vv v8, v8, v10
+; RV32-NEXT: lui a0, 1
+; RV32-NEXT: addi a0, a0, -241
+; RV32-NEXT: vand.vx v8, v8, a0
+; RV32-NEXT: li a0, 257
+; RV32-NEXT: vmul.vx v8, v8, a0
+; RV32-NEXT: vsrl.vi v8, v8, 8
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vp_ctpop_v16i16_unmasked:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetvli zero, a0, e16, m2, ta, ma
+; RV64-NEXT: vsrl.vi v10, v8, 1
+; RV64-NEXT: lui a0, 5
+; RV64-NEXT: addiw a0, a0, 1365
+; RV64-NEXT: vand.vx v10, v10, a0
+; RV64-NEXT: vsub.vv v8, v8, v10
+; RV64-NEXT: lui a0, 3
+; RV64-NEXT: addiw a0, a0, 819
+; RV64-NEXT: vand.vx v10, v8, a0
+; RV64-NEXT: vsrl.vi v8, v8, 2
+; RV64-NEXT: vand.vx v8, v8, a0
+; RV64-NEXT: vadd.vv v8, v10, v8
+; RV64-NEXT: vsrl.vi v10, v8, 4
+; RV64-NEXT: vadd.vv v8, v8, v10
+; RV64-NEXT: lui a0, 1
+; RV64-NEXT: addiw a0, a0, -241
+; RV64-NEXT: vand.vx v8, v8, a0
+; RV64-NEXT: li a0, 257
+; RV64-NEXT: vmul.vx v8, v8, a0
+; RV64-NEXT: vsrl.vi v8, v8, 8
+; RV64-NEXT: ret
+ %head = insertelement <16 x i1> poison, i1 true, i32 0
+ %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
+ %v = call <16 x i16> @llvm.vp.ctpop.v16i16(<16 x i16> %va, <16 x i1> %m, i32 %evl)
+ ret <16 x i16> %v
+}
+
+declare <2 x i32> @llvm.vp.ctpop.v2i32(<2 x i32>, <2 x i1>, i32)
+
+define <2 x i32> @vp_ctpop_v2i32(<2 x i32> %va, <2 x i1> %m, i32 zeroext %evl) {
+; RV32-LABEL: vp_ctpop_v2i32:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
+; RV32-NEXT: vsrl.vi v9, v8, 1, v0.t
+; RV32-NEXT: lui a0, 349525
+; RV32-NEXT: addi a0, a0, 1365
+; RV32-NEXT: vand.vx v9, v9, a0, v0.t
+; RV32-NEXT: vsub.vv v8, v8, v9, v0.t
+; RV32-NEXT: lui a0, 209715
+; RV32-NEXT: addi a0, a0, 819
+; RV32-NEXT: vand.vx v9, v8, a0, v0.t
+; RV32-NEXT: vsrl.vi v8, v8, 2, v0.t
+; RV32-NEXT: vand.vx v8, v8, a0, v0.t
+; RV32-NEXT: vadd.vv v8, v9, v8, v0.t
+; RV32-NEXT: vsrl.vi v9, v8, 4, v0.t
+; RV32-NEXT: vadd.vv v8, v8, v9, v0.t
+; RV32-NEXT: lui a0, 61681
+; RV32-NEXT: addi a0, a0, -241
+; RV32-NEXT: vand.vx v8, v8, a0, v0.t
+; RV32-NEXT: lui a0, 4112
+; RV32-NEXT: addi a0, a0, 257
+; RV32-NEXT: vmul.vx v8, v8, a0, v0.t
+; RV32-NEXT: vsrl.vi v8, v8, 24, v0.t
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vp_ctpop_v2i32:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
+; RV64-NEXT: vsrl.vi v9, v8, 1, v0.t
+; RV64-NEXT: lui a0, 349525
+; RV64-NEXT: addiw a0, a0, 1365
+; RV64-NEXT: vand.vx v9, v9, a0, v0.t
+; RV64-NEXT: vsub.vv v8, v8, v9, v0.t
+; RV64-NEXT: lui a0, 209715
+; RV64-NEXT: addiw a0, a0, 819
+; RV64-NEXT: vand.vx v9, v8, a0, v0.t
+; RV64-NEXT: vsrl.vi v8, v8, 2, v0.t
+; RV64-NEXT: vand.vx v8, v8, a0, v0.t
+; RV64-NEXT: vadd.vv v8, v9, v8, v0.t
+; RV64-NEXT: vsrl.vi v9, v8, 4, v0.t
+; RV64-NEXT: vadd.vv v8, v8, v9, v0.t
+; RV64-NEXT: lui a0, 61681
+; RV64-NEXT: addiw a0, a0, -241
+; RV64-NEXT: vand.vx v8, v8, a0, v0.t
+; RV64-NEXT: lui a0, 4112
+; RV64-NEXT: addiw a0, a0, 257
+; RV64-NEXT: vmul.vx v8, v8, a0, v0.t
+; RV64-NEXT: vsrl.vi v8, v8, 24, v0.t
+; RV64-NEXT: ret
+ %v = call <2 x i32> @llvm.vp.ctpop.v2i32(<2 x i32> %va, <2 x i1> %m, i32 %evl)
+ ret <2 x i32> %v
+}
+
+define <2 x i32> @vp_ctpop_v2i32_unmasked(<2 x i32> %va, i32 zeroext %evl) {
+; RV32-LABEL: vp_ctpop_v2i32_unmasked:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
+; RV32-NEXT: vsrl.vi v9, v8, 1
+; RV32-NEXT: lui a0, 349525
+; RV32-NEXT: addi a0, a0, 1365
+; RV32-NEXT: vand.vx v9, v9, a0
+; RV32-NEXT: vsub.vv v8, v8, v9
+; RV32-NEXT: lui a0, 209715
+; RV32-NEXT: addi a0, a0, 819
+; RV32-NEXT: vand.vx v9, v8, a0
+; RV32-NEXT: vsrl.vi v8, v8, 2
+; RV32-NEXT: vand.vx v8, v8, a0
+; RV32-NEXT: vadd.vv v8, v9, v8
+; RV32-NEXT: vsrl.vi v9, v8, 4
+; RV32-NEXT: vadd.vv v8, v8, v9
+; RV32-NEXT: lui a0, 61681
+; RV32-NEXT: addi a0, a0, -241
+; RV32-NEXT: vand.vx v8, v8, a0
+; RV32-NEXT: lui a0, 4112
+; RV32-NEXT: addi a0, a0, 257
+; RV32-NEXT: vmul.vx v8, v8, a0
+; RV32-NEXT: vsrl.vi v8, v8, 24
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vp_ctpop_v2i32_unmasked:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
+; RV64-NEXT: vsrl.vi v9, v8, 1
+; RV64-NEXT: lui a0, 349525
+; RV64-NEXT: addiw a0, a0, 1365
+; RV64-NEXT: vand.vx v9, v9, a0
+; RV64-NEXT: vsub.vv v8, v8, v9
+; RV64-NEXT: lui a0, 209715
+; RV64-NEXT: addiw a0, a0, 819
+; RV64-NEXT: vand.vx v9, v8, a0
+; RV64-NEXT: vsrl.vi v8, v8, 2
+; RV64-NEXT: vand.vx v8, v8, a0
+; RV64-NEXT: vadd.vv v8, v9, v8
+; RV64-NEXT: vsrl.vi v9, v8, 4
+; RV64-NEXT: vadd.vv v8, v8, v9
+; RV64-NEXT: lui a0, 61681
+; RV64-NEXT: addiw a0, a0, -241
+; RV64-NEXT: vand.vx v8, v8, a0
+; RV64-NEXT: lui a0, 4112
+; RV64-NEXT: addiw a0, a0, 257
+; RV64-NEXT: vmul.vx v8, v8, a0
+; RV64-NEXT: vsrl.vi v8, v8, 24
+; RV64-NEXT: ret
+ %head = insertelement <2 x i1> poison, i1 true, i32 0
+ %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
+ %v = call <2 x i32> @llvm.vp.ctpop.v2i32(<2 x i32> %va, <2 x i1> %m, i32 %evl)
+ ret <2 x i32> %v
+}
+
+declare <4 x i32> @llvm.vp.ctpop.v4i32(<4 x i32>, <4 x i1>, i32)
+
+define <4 x i32> @vp_ctpop_v4i32(<4 x i32> %va, <4 x i1> %m, i32 zeroext %evl) {
+; RV32-LABEL: vp_ctpop_v4i32:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetvli zero, a0, e32, m1, ta, ma
+; RV32-NEXT: vsrl.vi v9, v8, 1, v0.t
+; RV32-NEXT: lui a0, 349525
+; RV32-NEXT: addi a0, a0, 1365
+; RV32-NEXT: vand.vx v9, v9, a0, v0.t
+; RV32-NEXT: vsub.vv v8, v8, v9, v0.t
+; RV32-NEXT: lui a0, 209715
+; RV32-NEXT: addi a0, a0, 819
+; RV32-NEXT: vand.vx v9, v8, a0, v0.t
+; RV32-NEXT: vsrl.vi v8, v8, 2, v0.t
+; RV32-NEXT: vand.vx v8, v8, a0, v0.t
+; RV32-NEXT: vadd.vv v8, v9, v8, v0.t
+; RV32-NEXT: vsrl.vi v9, v8, 4, v0.t
+; RV32-NEXT: vadd.vv v8, v8, v9, v0.t
+; RV32-NEXT: lui a0, 61681
+; RV32-NEXT: addi a0, a0, -241
+; RV32-NEXT: vand.vx v8, v8, a0, v0.t
+; RV32-NEXT: lui a0, 4112
+; RV32-NEXT: addi a0, a0, 257
+; RV32-NEXT: vmul.vx v8, v8, a0, v0.t
+; RV32-NEXT: vsrl.vi v8, v8, 24, v0.t
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vp_ctpop_v4i32:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetvli zero, a0, e32, m1, ta, ma
+; RV64-NEXT: vsrl.vi v9, v8, 1, v0.t
+; RV64-NEXT: lui a0, 349525
+; RV64-NEXT: addiw a0, a0, 1365
+; RV64-NEXT: vand.vx v9, v9, a0, v0.t
+; RV64-NEXT: vsub.vv v8, v8, v9, v0.t
+; RV64-NEXT: lui a0, 209715
+; RV64-NEXT: addiw a0, a0, 819
+; RV64-NEXT: vand.vx v9, v8, a0, v0.t
+; RV64-NEXT: vsrl.vi v8, v8, 2, v0.t
+; RV64-NEXT: vand.vx v8, v8, a0, v0.t
+; RV64-NEXT: vadd.vv v8, v9, v8, v0.t
+; RV64-NEXT: vsrl.vi v9, v8, 4, v0.t
+; RV64-NEXT: vadd.vv v8, v8, v9, v0.t
+; RV64-NEXT: lui a0, 61681
+; RV64-NEXT: addiw a0, a0, -241
+; RV64-NEXT: vand.vx v8, v8, a0, v0.t
+; RV64-NEXT: lui a0, 4112
+; RV64-NEXT: addiw a0, a0, 257
+; RV64-NEXT: vmul.vx v8, v8, a0, v0.t
+; RV64-NEXT: vsrl.vi v8, v8, 24, v0.t
+; RV64-NEXT: ret
+ %v = call <4 x i32> @llvm.vp.ctpop.v4i32(<4 x i32> %va, <4 x i1> %m, i32 %evl)
+ ret <4 x i32> %v
+}
+
+define <4 x i32> @vp_ctpop_v4i32_unmasked(<4 x i32> %va, i32 zeroext %evl) {
+; RV32-LABEL: vp_ctpop_v4i32_unmasked:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetvli zero, a0, e32, m1, ta, ma
+; RV32-NEXT: vsrl.vi v9, v8, 1
+; RV32-NEXT: lui a0, 349525
+; RV32-NEXT: addi a0, a0, 1365
+; RV32-NEXT: vand.vx v9, v9, a0
+; RV32-NEXT: vsub.vv v8, v8, v9
+; RV32-NEXT: lui a0, 209715
+; RV32-NEXT: addi a0, a0, 819
+; RV32-NEXT: vand.vx v9, v8, a0
+; RV32-NEXT: vsrl.vi v8, v8, 2
+; RV32-NEXT: vand.vx v8, v8, a0
+; RV32-NEXT: vadd.vv v8, v9, v8
+; RV32-NEXT: vsrl.vi v9, v8, 4
+; RV32-NEXT: vadd.vv v8, v8, v9
+; RV32-NEXT: lui a0, 61681
+; RV32-NEXT: addi a0, a0, -241
+; RV32-NEXT: vand.vx v8, v8, a0
+; RV32-NEXT: lui a0, 4112
+; RV32-NEXT: addi a0, a0, 257
+; RV32-NEXT: vmul.vx v8, v8, a0
+; RV32-NEXT: vsrl.vi v8, v8, 24
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vp_ctpop_v4i32_unmasked:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetvli zero, a0, e32, m1, ta, ma
+; RV64-NEXT: vsrl.vi v9, v8, 1
+; RV64-NEXT: lui a0, 349525
+; RV64-NEXT: addiw a0, a0, 1365
+; RV64-NEXT: vand.vx v9, v9, a0
+; RV64-NEXT: vsub.vv v8, v8, v9
+; RV64-NEXT: lui a0, 209715
+; RV64-NEXT: addiw a0, a0, 819
+; RV64-NEXT: vand.vx v9, v8, a0
+; RV64-NEXT: vsrl.vi v8, v8, 2
+; RV64-NEXT: vand.vx v8, v8, a0
+; RV64-NEXT: vadd.vv v8, v9, v8
+; RV64-NEXT: vsrl.vi v9, v8, 4
+; RV64-NEXT: vadd.vv v8, v8, v9
+; RV64-NEXT: lui a0, 61681
+; RV64-NEXT: addiw a0, a0, -241
+; RV64-NEXT: vand.vx v8, v8, a0
+; RV64-NEXT: lui a0, 4112
+; RV64-NEXT: addiw a0, a0, 257
+; RV64-NEXT: vmul.vx v8, v8, a0
+; RV64-NEXT: vsrl.vi v8, v8, 24
+; RV64-NEXT: ret
+ %head = insertelement <4 x i1> poison, i1 true, i32 0
+ %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
+ %v = call <4 x i32> @llvm.vp.ctpop.v4i32(<4 x i32> %va, <4 x i1> %m, i32 %evl)
+ ret <4 x i32> %v
+}
+
+declare <8 x i32> @llvm.vp.ctpop.v8i32(<8 x i32>, <8 x i1>, i32)
+
+define <8 x i32> @vp_ctpop_v8i32(<8 x i32> %va, <8 x i1> %m, i32 zeroext %evl) {
+; RV32-LABEL: vp_ctpop_v8i32:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetvli zero, a0, e32, m2, ta, ma
+; RV32-NEXT: vsrl.vi v10, v8, 1, v0.t
+; RV32-NEXT: lui a0, 349525
+; RV32-NEXT: addi a0, a0, 1365
+; RV32-NEXT: vand.vx v10, v10, a0, v0.t
+; RV32-NEXT: vsub.vv v8, v8, v10, v0.t
+; RV32-NEXT: lui a0, 209715
+; RV32-NEXT: addi a0, a0, 819
+; RV32-NEXT: vand.vx v10, v8, a0, v0.t
+; RV32-NEXT: vsrl.vi v8, v8, 2, v0.t
+; RV32-NEXT: vand.vx v8, v8, a0, v0.t
+; RV32-NEXT: vadd.vv v8, v10, v8, v0.t
+; RV32-NEXT: vsrl.vi v10, v8, 4, v0.t
+; RV32-NEXT: vadd.vv v8, v8, v10, v0.t
+; RV32-NEXT: lui a0, 61681
+; RV32-NEXT: addi a0, a0, -241
+; RV32-NEXT: vand.vx v8, v8, a0, v0.t
+; RV32-NEXT: lui a0, 4112
+; RV32-NEXT: addi a0, a0, 257
+; RV32-NEXT: vmul.vx v8, v8, a0, v0.t
+; RV32-NEXT: vsrl.vi v8, v8, 24, v0.t
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vp_ctpop_v8i32:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetvli zero, a0, e32, m2, ta, ma
+; RV64-NEXT: vsrl.vi v10, v8, 1, v0.t
+; RV64-NEXT: lui a0, 349525
+; RV64-NEXT: addiw a0, a0, 1365
+; RV64-NEXT: vand.vx v10, v10, a0, v0.t
+; RV64-NEXT: vsub.vv v8, v8, v10, v0.t
+; RV64-NEXT: lui a0, 209715
+; RV64-NEXT: addiw a0, a0, 819
+; RV64-NEXT: vand.vx v10, v8, a0, v0.t
+; RV64-NEXT: vsrl.vi v8, v8, 2, v0.t
+; RV64-NEXT: vand.vx v8, v8, a0, v0.t
+; RV64-NEXT: vadd.vv v8, v10, v8, v0.t
+; RV64-NEXT: vsrl.vi v10, v8, 4, v0.t
+; RV64-NEXT: vadd.vv v8, v8, v10, v0.t
+; RV64-NEXT: lui a0, 61681
+; RV64-NEXT: addiw a0, a0, -241
+; RV64-NEXT: vand.vx v8, v8, a0, v0.t
+; RV64-NEXT: lui a0, 4112
+; RV64-NEXT: addiw a0, a0, 257
+; RV64-NEXT: vmul.vx v8, v8, a0, v0.t
+; RV64-NEXT: vsrl.vi v8, v8, 24, v0.t
+; RV64-NEXT: ret
+ %v = call <8 x i32> @llvm.vp.ctpop.v8i32(<8 x i32> %va, <8 x i1> %m, i32 %evl)
+ ret <8 x i32> %v
+}
+
+define <8 x i32> @vp_ctpop_v8i32_unmasked(<8 x i32> %va, i32 zeroext %evl) {
+; RV32-LABEL: vp_ctpop_v8i32_unmasked:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetvli zero, a0, e32, m2, ta, ma
+; RV32-NEXT: vsrl.vi v10, v8, 1
+; RV32-NEXT: lui a0, 349525
+; RV32-NEXT: addi a0, a0, 1365
+; RV32-NEXT: vand.vx v10, v10, a0
+; RV32-NEXT: vsub.vv v8, v8, v10
+; RV32-NEXT: lui a0, 209715
+; RV32-NEXT: addi a0, a0, 819
+; RV32-NEXT: vand.vx v10, v8, a0
+; RV32-NEXT: vsrl.vi v8, v8, 2
+; RV32-NEXT: vand.vx v8, v8, a0
+; RV32-NEXT: vadd.vv v8, v10, v8
+; RV32-NEXT: vsrl.vi v10, v8, 4
+; RV32-NEXT: vadd.vv v8, v8, v10
+; RV32-NEXT: lui a0, 61681
+; RV32-NEXT: addi a0, a0, -241
+; RV32-NEXT: vand.vx v8, v8, a0
+; RV32-NEXT: lui a0, 4112
+; RV32-NEXT: addi a0, a0, 257
+; RV32-NEXT: vmul.vx v8, v8, a0
+; RV32-NEXT: vsrl.vi v8, v8, 24
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vp_ctpop_v8i32_unmasked:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetvli zero, a0, e32, m2, ta, ma
+; RV64-NEXT: vsrl.vi v10, v8, 1
+; RV64-NEXT: lui a0, 349525
+; RV64-NEXT: addiw a0, a0, 1365
+; RV64-NEXT: vand.vx v10, v10, a0
+; RV64-NEXT: vsub.vv v8, v8, v10
+; RV64-NEXT: lui a0, 209715
+; RV64-NEXT: addiw a0, a0, 819
+; RV64-NEXT: vand.vx v10, v8, a0
+; RV64-NEXT: vsrl.vi v8, v8, 2
+; RV64-NEXT: vand.vx v8, v8, a0
+; RV64-NEXT: vadd.vv v8, v10, v8
+; RV64-NEXT: vsrl.vi v10, v8, 4
+; RV64-NEXT: vadd.vv v8, v8, v10
+; RV64-NEXT: lui a0, 61681
+; RV64-NEXT: addiw a0, a0, -241
+; RV64-NEXT: vand.vx v8, v8, a0
+; RV64-NEXT: lui a0, 4112
+; RV64-NEXT: addiw a0, a0, 257
+; RV64-NEXT: vmul.vx v8, v8, a0
+; RV64-NEXT: vsrl.vi v8, v8, 24
+; RV64-NEXT: ret
+ %head = insertelement <8 x i1> poison, i1 true, i32 0
+ %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
+ %v = call <8 x i32> @llvm.vp.ctpop.v8i32(<8 x i32> %va, <8 x i1> %m, i32 %evl)
+ ret <8 x i32> %v
+}
+
+declare <16 x i32> @llvm.vp.ctpop.v16i32(<16 x i32>, <16 x i1>, i32)
+
+define <16 x i32> @vp_ctpop_v16i32(<16 x i32> %va, <16 x i1> %m, i32 zeroext %evl) {
+; RV32-LABEL: vp_ctpop_v16i32:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetvli zero, a0, e32, m4, ta, ma
+; RV32-NEXT: vsrl.vi v12, v8, 1, v0.t
+; RV32-NEXT: lui a0, 349525
+; RV32-NEXT: addi a0, a0, 1365
+; RV32-NEXT: vand.vx v12, v12, a0, v0.t
+; RV32-NEXT: vsub.vv v8, v8, v12, v0.t
+; RV32-NEXT: lui a0, 209715
+; RV32-NEXT: addi a0, a0, 819
+; RV32-NEXT: vand.vx v12, v8, a0, v0.t
+; RV32-NEXT: vsrl.vi v8, v8, 2, v0.t
+; RV32-NEXT: vand.vx v8, v8, a0, v0.t
+; RV32-NEXT: vadd.vv v8, v12, v8, v0.t
+; RV32-NEXT: vsrl.vi v12, v8, 4, v0.t
+; RV32-NEXT: vadd.vv v8, v8, v12, v0.t
+; RV32-NEXT: lui a0, 61681
+; RV32-NEXT: addi a0, a0, -241
+; RV32-NEXT: vand.vx v8, v8, a0, v0.t
+; RV32-NEXT: lui a0, 4112
+; RV32-NEXT: addi a0, a0, 257
+; RV32-NEXT: vmul.vx v8, v8, a0, v0.t
+; RV32-NEXT: vsrl.vi v8, v8, 24, v0.t
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vp_ctpop_v16i32:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetvli zero, a0, e32, m4, ta, ma
+; RV64-NEXT: vsrl.vi v12, v8, 1, v0.t
+; RV64-NEXT: lui a0, 349525
+; RV64-NEXT: addiw a0, a0, 1365
+; RV64-NEXT: vand.vx v12, v12, a0, v0.t
+; RV64-NEXT: vsub.vv v8, v8, v12, v0.t
+; RV64-NEXT: lui a0, 209715
+; RV64-NEXT: addiw a0, a0, 819
+; RV64-NEXT: vand.vx v12, v8, a0, v0.t
+; RV64-NEXT: vsrl.vi v8, v8, 2, v0.t
+; RV64-NEXT: vand.vx v8, v8, a0, v0.t
+; RV64-NEXT: vadd.vv v8, v12, v8, v0.t
+; RV64-NEXT: vsrl.vi v12, v8, 4, v0.t
+; RV64-NEXT: vadd.vv v8, v8, v12, v0.t
+; RV64-NEXT: lui a0, 61681
+; RV64-NEXT: addiw a0, a0, -241
+; RV64-NEXT: vand.vx v8, v8, a0, v0.t
+; RV64-NEXT: lui a0, 4112
+; RV64-NEXT: addiw a0, a0, 257
+; RV64-NEXT: vmul.vx v8, v8, a0, v0.t
+; RV64-NEXT: vsrl.vi v8, v8, 24, v0.t
+; RV64-NEXT: ret
+ %v = call <16 x i32> @llvm.vp.ctpop.v16i32(<16 x i32> %va, <16 x i1> %m, i32 %evl)
+ ret <16 x i32> %v
+}
+
+define <16 x i32> @vp_ctpop_v16i32_unmasked(<16 x i32> %va, i32 zeroext %evl) {
+; RV32-LABEL: vp_ctpop_v16i32_unmasked:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetvli zero, a0, e32, m4, ta, ma
+; RV32-NEXT: vsrl.vi v12, v8, 1
+; RV32-NEXT: lui a0, 349525
+; RV32-NEXT: addi a0, a0, 1365
+; RV32-NEXT: vand.vx v12, v12, a0
+; RV32-NEXT: vsub.vv v8, v8, v12
+; RV32-NEXT: lui a0, 209715
+; RV32-NEXT: addi a0, a0, 819
+; RV32-NEXT: vand.vx v12, v8, a0
+; RV32-NEXT: vsrl.vi v8, v8, 2
+; RV32-NEXT: vand.vx v8, v8, a0
+; RV32-NEXT: vadd.vv v8, v12, v8
+; RV32-NEXT: vsrl.vi v12, v8, 4
+; RV32-NEXT: vadd.vv v8, v8, v12
+; RV32-NEXT: lui a0, 61681
+; RV32-NEXT: addi a0, a0, -241
+; RV32-NEXT: vand.vx v8, v8, a0
+; RV32-NEXT: lui a0, 4112
+; RV32-NEXT: addi a0, a0, 257
+; RV32-NEXT: vmul.vx v8, v8, a0
+; RV32-NEXT: vsrl.vi v8, v8, 24
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vp_ctpop_v16i32_unmasked:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetvli zero, a0, e32, m4, ta, ma
+; RV64-NEXT: vsrl.vi v12, v8, 1
+; RV64-NEXT: lui a0, 349525
+; RV64-NEXT: addiw a0, a0, 1365
+; RV64-NEXT: vand.vx v12, v12, a0
+; RV64-NEXT: vsub.vv v8, v8, v12
+; RV64-NEXT: lui a0, 209715
+; RV64-NEXT: addiw a0, a0, 819
+; RV64-NEXT: vand.vx v12, v8, a0
+; RV64-NEXT: vsrl.vi v8, v8, 2
+; RV64-NEXT: vand.vx v8, v8, a0
+; RV64-NEXT: vadd.vv v8, v12, v8
+; RV64-NEXT: vsrl.vi v12, v8, 4
+; RV64-NEXT: vadd.vv v8, v8, v12
+; RV64-NEXT: lui a0, 61681
+; RV64-NEXT: addiw a0, a0, -241
+; RV64-NEXT: vand.vx v8, v8, a0
+; RV64-NEXT: lui a0, 4112
+; RV64-NEXT: addiw a0, a0, 257
+; RV64-NEXT: vmul.vx v8, v8, a0
+; RV64-NEXT: vsrl.vi v8, v8, 24
+; RV64-NEXT: ret
+ %head = insertelement <16 x i1> poison, i1 true, i32 0
+ %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
+ %v = call <16 x i32> @llvm.vp.ctpop.v16i32(<16 x i32> %va, <16 x i1> %m, i32 %evl)
+ ret <16 x i32> %v
+}
+
+declare <2 x i64> @llvm.vp.ctpop.v2i64(<2 x i64>, <2 x i1>, i32)
+
+define <2 x i64> @vp_ctpop_v2i64(<2 x i64> %va, <2 x i1> %m, i32 zeroext %evl) {
+; RV32-LABEL: vp_ctpop_v2i64:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetvli zero, a0, e64, m1, ta, ma
+; RV32-NEXT: vsrl.vi v9, v8, 1, v0.t
+; RV32-NEXT: lui a1, 349525
+; RV32-NEXT: addi a1, a1, 1365
+; RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma
+; RV32-NEXT: vmv.v.x v10, a1
+; RV32-NEXT: vsetvli zero, a0, e64, m1, ta, ma
+; RV32-NEXT: vand.vv v9, v9, v10, v0.t
+; RV32-NEXT: vsub.vv v8, v8, v9, v0.t
+; RV32-NEXT: lui a1, 209715
+; RV32-NEXT: addi a1, a1, 819
+; RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma
+; RV32-NEXT: vmv.v.x v9, a1
+; RV32-NEXT: vsetvli zero, a0, e64, m1, ta, ma
+; RV32-NEXT: vand.vv v10, v8, v9, v0.t
+; RV32-NEXT: vsrl.vi v8, v8, 2, v0.t
+; RV32-NEXT: vand.vv v8, v8, v9, v0.t
+; RV32-NEXT: vadd.vv v8, v10, v8, v0.t
+; RV32-NEXT: vsrl.vi v9, v8, 4, v0.t
+; RV32-NEXT: vadd.vv v8, v8, v9, v0.t
+; RV32-NEXT: lui a1, 61681
+; RV32-NEXT: addi a1, a1, -241
+; RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma
+; RV32-NEXT: vmv.v.x v9, a1
+; RV32-NEXT: vsetvli zero, a0, e64, m1, ta, ma
+; RV32-NEXT: vand.vv v8, v8, v9, v0.t
+; RV32-NEXT: lui a1, 4112
+; RV32-NEXT: addi a1, a1, 257
+; RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma
+; RV32-NEXT: vmv.v.x v9, a1
+; RV32-NEXT: vsetvli zero, a0, e64, m1, ta, ma
+; RV32-NEXT: vmul.vv v8, v8, v9, v0.t
+; RV32-NEXT: li a0, 56
+; RV32-NEXT: vsrl.vx v8, v8, a0, v0.t
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vp_ctpop_v2i64:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetvli zero, a0, e64, m1, ta, ma
+; RV64-NEXT: lui a0, %hi(.LCPI24_0)
+; RV64-NEXT: ld a0, %lo(.LCPI24_0)(a0)
+; RV64-NEXT: lui a1, %hi(.LCPI24_1)
+; RV64-NEXT: ld a1, %lo(.LCPI24_1)(a1)
+; RV64-NEXT: vsrl.vi v9, v8, 1, v0.t
+; RV64-NEXT: vand.vx v9, v9, a0, v0.t
+; RV64-NEXT: vsub.vv v8, v8, v9, v0.t
+; RV64-NEXT: vand.vx v9, v8, a1, v0.t
+; RV64-NEXT: vsrl.vi v8, v8, 2, v0.t
+; RV64-NEXT: vand.vx v8, v8, a1, v0.t
+; RV64-NEXT: vadd.vv v8, v9, v8, v0.t
+; RV64-NEXT: lui a0, %hi(.LCPI24_2)
+; RV64-NEXT: ld a0, %lo(.LCPI24_2)(a0)
+; RV64-NEXT: lui a1, %hi(.LCPI24_3)
+; RV64-NEXT: ld a1, %lo(.LCPI24_3)(a1)
+; RV64-NEXT: vsrl.vi v9, v8, 4, v0.t
+; RV64-NEXT: vadd.vv v8, v8, v9, v0.t
+; RV64-NEXT: vand.vx v8, v8, a0, v0.t
+; RV64-NEXT: vmul.vx v8, v8, a1, v0.t
+; RV64-NEXT: li a0, 56
+; RV64-NEXT: vsrl.vx v8, v8, a0, v0.t
+; RV64-NEXT: ret
+ %v = call <2 x i64> @llvm.vp.ctpop.v2i64(<2 x i64> %va, <2 x i1> %m, i32 %evl)
+ ret <2 x i64> %v
+}
+
+define <2 x i64> @vp_ctpop_v2i64_unmasked(<2 x i64> %va, i32 zeroext %evl) {
+; RV32-LABEL: vp_ctpop_v2i64_unmasked:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetvli zero, a0, e64, m1, ta, ma
+; RV32-NEXT: vsrl.vi v9, v8, 1
+; RV32-NEXT: lui a1, 349525
+; RV32-NEXT: addi a1, a1, 1365
+; RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma
+; RV32-NEXT: vmv.v.x v10, a1
+; RV32-NEXT: vsetvli zero, a0, e64, m1, ta, ma
+; RV32-NEXT: vand.vv v9, v9, v10
+; RV32-NEXT: vsub.vv v8, v8, v9
+; RV32-NEXT: lui a1, 209715
+; RV32-NEXT: addi a1, a1, 819
+; RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma
+; RV32-NEXT: vmv.v.x v9, a1
+; RV32-NEXT: vsetvli zero, a0, e64, m1, ta, ma
+; RV32-NEXT: vand.vv v10, v8, v9
+; RV32-NEXT: vsrl.vi v8, v8, 2
+; RV32-NEXT: vand.vv v8, v8, v9
+; RV32-NEXT: vadd.vv v8, v10, v8
+; RV32-NEXT: vsrl.vi v9, v8, 4
+; RV32-NEXT: vadd.vv v8, v8, v9
+; RV32-NEXT: lui a1, 61681
+; RV32-NEXT: addi a1, a1, -241
+; RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma
+; RV32-NEXT: vmv.v.x v9, a1
+; RV32-NEXT: vsetvli zero, a0, e64, m1, ta, ma
+; RV32-NEXT: vand.vv v8, v8, v9
+; RV32-NEXT: lui a1, 4112
+; RV32-NEXT: addi a1, a1, 257
+; RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma
+; RV32-NEXT: vmv.v.x v9, a1
+; RV32-NEXT: vsetvli zero, a0, e64, m1, ta, ma
+; RV32-NEXT: vmul.vv v8, v8, v9
+; RV32-NEXT: li a0, 56
+; RV32-NEXT: vsrl.vx v8, v8, a0
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vp_ctpop_v2i64_unmasked:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetvli zero, a0, e64, m1, ta, ma
+; RV64-NEXT: lui a0, %hi(.LCPI25_0)
+; RV64-NEXT: ld a0, %lo(.LCPI25_0)(a0)
+; RV64-NEXT: lui a1, %hi(.LCPI25_1)
+; RV64-NEXT: ld a1, %lo(.LCPI25_1)(a1)
+; RV64-NEXT: vsrl.vi v9, v8, 1
+; RV64-NEXT: vand.vx v9, v9, a0
+; RV64-NEXT: vsub.vv v8, v8, v9
+; RV64-NEXT: vand.vx v9, v8, a1
+; RV64-NEXT: vsrl.vi v8, v8, 2
+; RV64-NEXT: vand.vx v8, v8, a1
+; RV64-NEXT: vadd.vv v8, v9, v8
+; RV64-NEXT: lui a0, %hi(.LCPI25_2)
+; RV64-NEXT: ld a0, %lo(.LCPI25_2)(a0)
+; RV64-NEXT: lui a1, %hi(.LCPI25_3)
+; RV64-NEXT: ld a1, %lo(.LCPI25_3)(a1)
+; RV64-NEXT: vsrl.vi v9, v8, 4
+; RV64-NEXT: vadd.vv v8, v8, v9
+; RV64-NEXT: vand.vx v8, v8, a0
+; RV64-NEXT: vmul.vx v8, v8, a1
+; RV64-NEXT: li a0, 56
+; RV64-NEXT: vsrl.vx v8, v8, a0
+; RV64-NEXT: ret
+ %head = insertelement <2 x i1> poison, i1 true, i32 0
+ %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer
+ %v = call <2 x i64> @llvm.vp.ctpop.v2i64(<2 x i64> %va, <2 x i1> %m, i32 %evl)
+ ret <2 x i64> %v
+}
+
+declare <4 x i64> @llvm.vp.ctpop.v4i64(<4 x i64>, <4 x i1>, i32)
+
+define <4 x i64> @vp_ctpop_v4i64(<4 x i64> %va, <4 x i1> %m, i32 zeroext %evl) {
+; RV32-LABEL: vp_ctpop_v4i64:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetvli zero, a0, e64, m2, ta, ma
+; RV32-NEXT: vsrl.vi v10, v8, 1, v0.t
+; RV32-NEXT: lui a1, 349525
+; RV32-NEXT: addi a1, a1, 1365
+; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma
+; RV32-NEXT: vmv.v.x v12, a1
+; RV32-NEXT: vsetvli zero, a0, e64, m2, ta, ma
+; RV32-NEXT: vand.vv v10, v10, v12, v0.t
+; RV32-NEXT: vsub.vv v8, v8, v10, v0.t
+; RV32-NEXT: lui a1, 209715
+; RV32-NEXT: addi a1, a1, 819
+; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma
+; RV32-NEXT: vmv.v.x v10, a1
+; RV32-NEXT: vsetvli zero, a0, e64, m2, ta, ma
+; RV32-NEXT: vand.vv v12, v8, v10, v0.t
+; RV32-NEXT: vsrl.vi v8, v8, 2, v0.t
+; RV32-NEXT: vand.vv v8, v8, v10, v0.t
+; RV32-NEXT: vadd.vv v8, v12, v8, v0.t
+; RV32-NEXT: vsrl.vi v10, v8, 4, v0.t
+; RV32-NEXT: vadd.vv v8, v8, v10, v0.t
+; RV32-NEXT: lui a1, 61681
+; RV32-NEXT: addi a1, a1, -241
+; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma
+; RV32-NEXT: vmv.v.x v10, a1
+; RV32-NEXT: vsetvli zero, a0, e64, m2, ta, ma
+; RV32-NEXT: vand.vv v8, v8, v10, v0.t
+; RV32-NEXT: lui a1, 4112
+; RV32-NEXT: addi a1, a1, 257
+; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma
+; RV32-NEXT: vmv.v.x v10, a1
+; RV32-NEXT: vsetvli zero, a0, e64, m2, ta, ma
+; RV32-NEXT: vmul.vv v8, v8, v10, v0.t
+; RV32-NEXT: li a0, 56
+; RV32-NEXT: vsrl.vx v8, v8, a0, v0.t
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vp_ctpop_v4i64:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetvli zero, a0, e64, m2, ta, ma
+; RV64-NEXT: lui a0, %hi(.LCPI26_0)
+; RV64-NEXT: ld a0, %lo(.LCPI26_0)(a0)
+; RV64-NEXT: lui a1, %hi(.LCPI26_1)
+; RV64-NEXT: ld a1, %lo(.LCPI26_1)(a1)
+; RV64-NEXT: vsrl.vi v10, v8, 1, v0.t
+; RV64-NEXT: vand.vx v10, v10, a0, v0.t
+; RV64-NEXT: vsub.vv v8, v8, v10, v0.t
+; RV64-NEXT: vand.vx v10, v8, a1, v0.t
+; RV64-NEXT: vsrl.vi v8, v8, 2, v0.t
+; RV64-NEXT: vand.vx v8, v8, a1, v0.t
+; RV64-NEXT: vadd.vv v8, v10, v8, v0.t
+; RV64-NEXT: lui a0, %hi(.LCPI26_2)
+; RV64-NEXT: ld a0, %lo(.LCPI26_2)(a0)
+; RV64-NEXT: lui a1, %hi(.LCPI26_3)
+; RV64-NEXT: ld a1, %lo(.LCPI26_3)(a1)
+; RV64-NEXT: vsrl.vi v10, v8, 4, v0.t
+; RV64-NEXT: vadd.vv v8, v8, v10, v0.t
+; RV64-NEXT: vand.vx v8, v8, a0, v0.t
+; RV64-NEXT: vmul.vx v8, v8, a1, v0.t
+; RV64-NEXT: li a0, 56
+; RV64-NEXT: vsrl.vx v8, v8, a0, v0.t
+; RV64-NEXT: ret
+ %v = call <4 x i64> @llvm.vp.ctpop.v4i64(<4 x i64> %va, <4 x i1> %m, i32 %evl)
+ ret <4 x i64> %v
+}
+
+define <4 x i64> @vp_ctpop_v4i64_unmasked(<4 x i64> %va, i32 zeroext %evl) {
+; RV32-LABEL: vp_ctpop_v4i64_unmasked:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetvli zero, a0, e64, m2, ta, ma
+; RV32-NEXT: vsrl.vi v10, v8, 1
+; RV32-NEXT: lui a1, 349525
+; RV32-NEXT: addi a1, a1, 1365
+; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma
+; RV32-NEXT: vmv.v.x v12, a1
+; RV32-NEXT: vsetvli zero, a0, e64, m2, ta, ma
+; RV32-NEXT: vand.vv v10, v10, v12
+; RV32-NEXT: vsub.vv v8, v8, v10
+; RV32-NEXT: lui a1, 209715
+; RV32-NEXT: addi a1, a1, 819
+; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma
+; RV32-NEXT: vmv.v.x v10, a1
+; RV32-NEXT: vsetvli zero, a0, e64, m2, ta, ma
+; RV32-NEXT: vand.vv v12, v8, v10
+; RV32-NEXT: vsrl.vi v8, v8, 2
+; RV32-NEXT: vand.vv v8, v8, v10
+; RV32-NEXT: vadd.vv v8, v12, v8
+; RV32-NEXT: vsrl.vi v10, v8, 4
+; RV32-NEXT: vadd.vv v8, v8, v10
+; RV32-NEXT: lui a1, 61681
+; RV32-NEXT: addi a1, a1, -241
+; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma
+; RV32-NEXT: vmv.v.x v10, a1
+; RV32-NEXT: vsetvli zero, a0, e64, m2, ta, ma
+; RV32-NEXT: vand.vv v8, v8, v10
+; RV32-NEXT: lui a1, 4112
+; RV32-NEXT: addi a1, a1, 257
+; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma
+; RV32-NEXT: vmv.v.x v10, a1
+; RV32-NEXT: vsetvli zero, a0, e64, m2, ta, ma
+; RV32-NEXT: vmul.vv v8, v8, v10
+; RV32-NEXT: li a0, 56
+; RV32-NEXT: vsrl.vx v8, v8, a0
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vp_ctpop_v4i64_unmasked:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetvli zero, a0, e64, m2, ta, ma
+; RV64-NEXT: lui a0, %hi(.LCPI27_0)
+; RV64-NEXT: ld a0, %lo(.LCPI27_0)(a0)
+; RV64-NEXT: lui a1, %hi(.LCPI27_1)
+; RV64-NEXT: ld a1, %lo(.LCPI27_1)(a1)
+; RV64-NEXT: vsrl.vi v10, v8, 1
+; RV64-NEXT: vand.vx v10, v10, a0
+; RV64-NEXT: vsub.vv v8, v8, v10
+; RV64-NEXT: vand.vx v10, v8, a1
+; RV64-NEXT: vsrl.vi v8, v8, 2
+; RV64-NEXT: vand.vx v8, v8, a1
+; RV64-NEXT: vadd.vv v8, v10, v8
+; RV64-NEXT: lui a0, %hi(.LCPI27_2)
+; RV64-NEXT: ld a0, %lo(.LCPI27_2)(a0)
+; RV64-NEXT: lui a1, %hi(.LCPI27_3)
+; RV64-NEXT: ld a1, %lo(.LCPI27_3)(a1)
+; RV64-NEXT: vsrl.vi v10, v8, 4
+; RV64-NEXT: vadd.vv v8, v8, v10
+; RV64-NEXT: vand.vx v8, v8, a0
+; RV64-NEXT: vmul.vx v8, v8, a1
+; RV64-NEXT: li a0, 56
+; RV64-NEXT: vsrl.vx v8, v8, a0
+; RV64-NEXT: ret
+ %head = insertelement <4 x i1> poison, i1 true, i32 0
+ %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer
+ %v = call <4 x i64> @llvm.vp.ctpop.v4i64(<4 x i64> %va, <4 x i1> %m, i32 %evl)
+ ret <4 x i64> %v
+}
+
+declare <8 x i64> @llvm.vp.ctpop.v8i64(<8 x i64>, <8 x i1>, i32)
+
+define <8 x i64> @vp_ctpop_v8i64(<8 x i64> %va, <8 x i1> %m, i32 zeroext %evl) {
+; RV32-LABEL: vp_ctpop_v8i64:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetvli zero, a0, e64, m4, ta, ma
+; RV32-NEXT: vsrl.vi v12, v8, 1, v0.t
+; RV32-NEXT: lui a1, 349525
+; RV32-NEXT: addi a1, a1, 1365
+; RV32-NEXT: vsetivli zero, 16, e32, m4, ta, ma
+; RV32-NEXT: vmv.v.x v16, a1
+; RV32-NEXT: vsetvli zero, a0, e64, m4, ta, ma
+; RV32-NEXT: vand.vv v12, v12, v16, v0.t
+; RV32-NEXT: vsub.vv v8, v8, v12, v0.t
+; RV32-NEXT: lui a1, 209715
+; RV32-NEXT: addi a1, a1, 819
+; RV32-NEXT: vsetivli zero, 16, e32, m4, ta, ma
+; RV32-NEXT: vmv.v.x v12, a1
+; RV32-NEXT: vsetvli zero, a0, e64, m4, ta, ma
+; RV32-NEXT: vand.vv v16, v8, v12, v0.t
+; RV32-NEXT: vsrl.vi v8, v8, 2, v0.t
+; RV32-NEXT: vand.vv v8, v8, v12, v0.t
+; RV32-NEXT: vadd.vv v8, v16, v8, v0.t
+; RV32-NEXT: vsrl.vi v12, v8, 4, v0.t
+; RV32-NEXT: vadd.vv v8, v8, v12, v0.t
+; RV32-NEXT: lui a1, 61681
+; RV32-NEXT: addi a1, a1, -241
+; RV32-NEXT: vsetivli zero, 16, e32, m4, ta, ma
+; RV32-NEXT: vmv.v.x v12, a1
+; RV32-NEXT: vsetvli zero, a0, e64, m4, ta, ma
+; RV32-NEXT: vand.vv v8, v8, v12, v0.t
+; RV32-NEXT: lui a1, 4112
+; RV32-NEXT: addi a1, a1, 257
+; RV32-NEXT: vsetivli zero, 16, e32, m4, ta, ma
+; RV32-NEXT: vmv.v.x v12, a1
+; RV32-NEXT: vsetvli zero, a0, e64, m4, ta, ma
+; RV32-NEXT: vmul.vv v8, v8, v12, v0.t
+; RV32-NEXT: li a0, 56
+; RV32-NEXT: vsrl.vx v8, v8, a0, v0.t
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vp_ctpop_v8i64:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetvli zero, a0, e64, m4, ta, ma
+; RV64-NEXT: lui a0, %hi(.LCPI28_0)
+; RV64-NEXT: ld a0, %lo(.LCPI28_0)(a0)
+; RV64-NEXT: lui a1, %hi(.LCPI28_1)
+; RV64-NEXT: ld a1, %lo(.LCPI28_1)(a1)
+; RV64-NEXT: vsrl.vi v12, v8, 1, v0.t
+; RV64-NEXT: vand.vx v12, v12, a0, v0.t
+; RV64-NEXT: vsub.vv v8, v8, v12, v0.t
+; RV64-NEXT: vand.vx v12, v8, a1, v0.t
+; RV64-NEXT: vsrl.vi v8, v8, 2, v0.t
+; RV64-NEXT: vand.vx v8, v8, a1, v0.t
+; RV64-NEXT: vadd.vv v8, v12, v8, v0.t
+; RV64-NEXT: lui a0, %hi(.LCPI28_2)
+; RV64-NEXT: ld a0, %lo(.LCPI28_2)(a0)
+; RV64-NEXT: lui a1, %hi(.LCPI28_3)
+; RV64-NEXT: ld a1, %lo(.LCPI28_3)(a1)
+; RV64-NEXT: vsrl.vi v12, v8, 4, v0.t
+; RV64-NEXT: vadd.vv v8, v8, v12, v0.t
+; RV64-NEXT: vand.vx v8, v8, a0, v0.t
+; RV64-NEXT: vmul.vx v8, v8, a1, v0.t
+; RV64-NEXT: li a0, 56
+; RV64-NEXT: vsrl.vx v8, v8, a0, v0.t
+; RV64-NEXT: ret
+ %v = call <8 x i64> @llvm.vp.ctpop.v8i64(<8 x i64> %va, <8 x i1> %m, i32 %evl)
+ ret <8 x i64> %v
+}
+
+define <8 x i64> @vp_ctpop_v8i64_unmasked(<8 x i64> %va, i32 zeroext %evl) {
+; RV32-LABEL: vp_ctpop_v8i64_unmasked:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetvli zero, a0, e64, m4, ta, ma
+; RV32-NEXT: vsrl.vi v12, v8, 1
+; RV32-NEXT: lui a1, 349525
+; RV32-NEXT: addi a1, a1, 1365
+; RV32-NEXT: vsetivli zero, 16, e32, m4, ta, ma
+; RV32-NEXT: vmv.v.x v16, a1
+; RV32-NEXT: vsetvli zero, a0, e64, m4, ta, ma
+; RV32-NEXT: vand.vv v12, v12, v16
+; RV32-NEXT: vsub.vv v8, v8, v12
+; RV32-NEXT: lui a1, 209715
+; RV32-NEXT: addi a1, a1, 819
+; RV32-NEXT: vsetivli zero, 16, e32, m4, ta, ma
+; RV32-NEXT: vmv.v.x v12, a1
+; RV32-NEXT: vsetvli zero, a0, e64, m4, ta, ma
+; RV32-NEXT: vand.vv v16, v8, v12
+; RV32-NEXT: vsrl.vi v8, v8, 2
+; RV32-NEXT: vand.vv v8, v8, v12
+; RV32-NEXT: vadd.vv v8, v16, v8
+; RV32-NEXT: vsrl.vi v12, v8, 4
+; RV32-NEXT: vadd.vv v8, v8, v12
+; RV32-NEXT: lui a1, 61681
+; RV32-NEXT: addi a1, a1, -241
+; RV32-NEXT: vsetivli zero, 16, e32, m4, ta, ma
+; RV32-NEXT: vmv.v.x v12, a1
+; RV32-NEXT: vsetvli zero, a0, e64, m4, ta, ma
+; RV32-NEXT: vand.vv v8, v8, v12
+; RV32-NEXT: lui a1, 4112
+; RV32-NEXT: addi a1, a1, 257
+; RV32-NEXT: vsetivli zero, 16, e32, m4, ta, ma
+; RV32-NEXT: vmv.v.x v12, a1
+; RV32-NEXT: vsetvli zero, a0, e64, m4, ta, ma
+; RV32-NEXT: vmul.vv v8, v8, v12
+; RV32-NEXT: li a0, 56
+; RV32-NEXT: vsrl.vx v8, v8, a0
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vp_ctpop_v8i64_unmasked:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetvli zero, a0, e64, m4, ta, ma
+; RV64-NEXT: lui a0, %hi(.LCPI29_0)
+; RV64-NEXT: ld a0, %lo(.LCPI29_0)(a0)
+; RV64-NEXT: lui a1, %hi(.LCPI29_1)
+; RV64-NEXT: ld a1, %lo(.LCPI29_1)(a1)
+; RV64-NEXT: vsrl.vi v12, v8, 1
+; RV64-NEXT: vand.vx v12, v12, a0
+; RV64-NEXT: vsub.vv v8, v8, v12
+; RV64-NEXT: vand.vx v12, v8, a1
+; RV64-NEXT: vsrl.vi v8, v8, 2
+; RV64-NEXT: vand.vx v8, v8, a1
+; RV64-NEXT: vadd.vv v8, v12, v8
+; RV64-NEXT: lui a0, %hi(.LCPI29_2)
+; RV64-NEXT: ld a0, %lo(.LCPI29_2)(a0)
+; RV64-NEXT: lui a1, %hi(.LCPI29_3)
+; RV64-NEXT: ld a1, %lo(.LCPI29_3)(a1)
+; RV64-NEXT: vsrl.vi v12, v8, 4
+; RV64-NEXT: vadd.vv v8, v8, v12
+; RV64-NEXT: vand.vx v8, v8, a0
+; RV64-NEXT: vmul.vx v8, v8, a1
+; RV64-NEXT: li a0, 56
+; RV64-NEXT: vsrl.vx v8, v8, a0
+; RV64-NEXT: ret
+ %head = insertelement <8 x i1> poison, i1 true, i32 0
+ %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer
+ %v = call <8 x i64> @llvm.vp.ctpop.v8i64(<8 x i64> %va, <8 x i1> %m, i32 %evl)
+ ret <8 x i64> %v
+}
+
+declare <15 x i64> @llvm.vp.ctpop.v15i64(<15 x i64>, <15 x i1>, i32)
+
+define <15 x i64> @vp_ctpop_v15i64(<15 x i64> %va, <15 x i1> %m, i32 zeroext %evl) {
+; RV32-LABEL: vp_ctpop_v15i64:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV32-NEXT: vsrl.vi v16, v8, 1, v0.t
+; RV32-NEXT: lui a1, 349525
+; RV32-NEXT: addi a1, a1, 1365
+; RV32-NEXT: li a2, 32
+; RV32-NEXT: vsetvli zero, a2, e32, m8, ta, ma
+; RV32-NEXT: vmv.v.x v24, a1
+; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV32-NEXT: vand.vv v16, v16, v24, v0.t
+; RV32-NEXT: vsub.vv v8, v8, v16, v0.t
+; RV32-NEXT: lui a1, 209715
+; RV32-NEXT: addi a1, a1, 819
+; RV32-NEXT: vsetvli zero, a2, e32, m8, ta, ma
+; RV32-NEXT: vmv.v.x v16, a1
+; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV32-NEXT: vand.vv v24, v8, v16, v0.t
+; RV32-NEXT: vsrl.vi v8, v8, 2, v0.t
+; RV32-NEXT: vand.vv v8, v8, v16, v0.t
+; RV32-NEXT: vadd.vv v8, v24, v8, v0.t
+; RV32-NEXT: vsrl.vi v16, v8, 4, v0.t
+; RV32-NEXT: vadd.vv v8, v8, v16, v0.t
+; RV32-NEXT: lui a1, 61681
+; RV32-NEXT: addi a1, a1, -241
+; RV32-NEXT: vsetvli zero, a2, e32, m8, ta, ma
+; RV32-NEXT: vmv.v.x v16, a1
+; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV32-NEXT: vand.vv v8, v8, v16, v0.t
+; RV32-NEXT: lui a1, 4112
+; RV32-NEXT: addi a1, a1, 257
+; RV32-NEXT: vsetvli zero, a2, e32, m8, ta, ma
+; RV32-NEXT: vmv.v.x v16, a1
+; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV32-NEXT: vmul.vv v8, v8, v16, v0.t
+; RV32-NEXT: li a0, 56
+; RV32-NEXT: vsrl.vx v8, v8, a0, v0.t
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vp_ctpop_v15i64:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV64-NEXT: lui a0, %hi(.LCPI30_0)
+; RV64-NEXT: ld a0, %lo(.LCPI30_0)(a0)
+; RV64-NEXT: lui a1, %hi(.LCPI30_1)
+; RV64-NEXT: ld a1, %lo(.LCPI30_1)(a1)
+; RV64-NEXT: vsrl.vi v16, v8, 1, v0.t
+; RV64-NEXT: vand.vx v16, v16, a0, v0.t
+; RV64-NEXT: vsub.vv v8, v8, v16, v0.t
+; RV64-NEXT: vand.vx v16, v8, a1, v0.t
+; RV64-NEXT: vsrl.vi v8, v8, 2, v0.t
+; RV64-NEXT: vand.vx v8, v8, a1, v0.t
+; RV64-NEXT: vadd.vv v8, v16, v8, v0.t
+; RV64-NEXT: lui a0, %hi(.LCPI30_2)
+; RV64-NEXT: ld a0, %lo(.LCPI30_2)(a0)
+; RV64-NEXT: lui a1, %hi(.LCPI30_3)
+; RV64-NEXT: ld a1, %lo(.LCPI30_3)(a1)
+; RV64-NEXT: vsrl.vi v16, v8, 4, v0.t
+; RV64-NEXT: vadd.vv v8, v8, v16, v0.t
+; RV64-NEXT: vand.vx v8, v8, a0, v0.t
+; RV64-NEXT: vmul.vx v8, v8, a1, v0.t
+; RV64-NEXT: li a0, 56
+; RV64-NEXT: vsrl.vx v8, v8, a0, v0.t
+; RV64-NEXT: ret
+ %v = call <15 x i64> @llvm.vp.ctpop.v15i64(<15 x i64> %va, <15 x i1> %m, i32 %evl)
+ ret <15 x i64> %v
+}
+
+define <15 x i64> @vp_ctpop_v15i64_unmasked(<15 x i64> %va, i32 zeroext %evl) {
+; RV32-LABEL: vp_ctpop_v15i64_unmasked:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV32-NEXT: vsrl.vi v16, v8, 1
+; RV32-NEXT: lui a1, 349525
+; RV32-NEXT: addi a1, a1, 1365
+; RV32-NEXT: li a2, 32
+; RV32-NEXT: vsetvli zero, a2, e32, m8, ta, ma
+; RV32-NEXT: vmv.v.x v24, a1
+; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV32-NEXT: vand.vv v16, v16, v24
+; RV32-NEXT: vsub.vv v8, v8, v16
+; RV32-NEXT: lui a1, 209715
+; RV32-NEXT: addi a1, a1, 819
+; RV32-NEXT: vsetvli zero, a2, e32, m8, ta, ma
+; RV32-NEXT: vmv.v.x v16, a1
+; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV32-NEXT: vand.vv v24, v8, v16
+; RV32-NEXT: vsrl.vi v8, v8, 2
+; RV32-NEXT: vand.vv v8, v8, v16
+; RV32-NEXT: vadd.vv v8, v24, v8
+; RV32-NEXT: vsrl.vi v16, v8, 4
+; RV32-NEXT: vadd.vv v8, v8, v16
+; RV32-NEXT: lui a1, 61681
+; RV32-NEXT: addi a1, a1, -241
+; RV32-NEXT: vsetvli zero, a2, e32, m8, ta, ma
+; RV32-NEXT: vmv.v.x v16, a1
+; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV32-NEXT: vand.vv v8, v8, v16
+; RV32-NEXT: lui a1, 4112
+; RV32-NEXT: addi a1, a1, 257
+; RV32-NEXT: vsetvli zero, a2, e32, m8, ta, ma
+; RV32-NEXT: vmv.v.x v16, a1
+; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV32-NEXT: vmul.vv v8, v8, v16
+; RV32-NEXT: li a0, 56
+; RV32-NEXT: vsrl.vx v8, v8, a0
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vp_ctpop_v15i64_unmasked:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV64-NEXT: lui a0, %hi(.LCPI31_0)
+; RV64-NEXT: ld a0, %lo(.LCPI31_0)(a0)
+; RV64-NEXT: lui a1, %hi(.LCPI31_1)
+; RV64-NEXT: ld a1, %lo(.LCPI31_1)(a1)
+; RV64-NEXT: vsrl.vi v16, v8, 1
+; RV64-NEXT: vand.vx v16, v16, a0
+; RV64-NEXT: vsub.vv v8, v8, v16
+; RV64-NEXT: vand.vx v16, v8, a1
+; RV64-NEXT: vsrl.vi v8, v8, 2
+; RV64-NEXT: vand.vx v8, v8, a1
+; RV64-NEXT: vadd.vv v8, v16, v8
+; RV64-NEXT: lui a0, %hi(.LCPI31_2)
+; RV64-NEXT: ld a0, %lo(.LCPI31_2)(a0)
+; RV64-NEXT: lui a1, %hi(.LCPI31_3)
+; RV64-NEXT: ld a1, %lo(.LCPI31_3)(a1)
+; RV64-NEXT: vsrl.vi v16, v8, 4
+; RV64-NEXT: vadd.vv v8, v8, v16
+; RV64-NEXT: vand.vx v8, v8, a0
+; RV64-NEXT: vmul.vx v8, v8, a1
+; RV64-NEXT: li a0, 56
+; RV64-NEXT: vsrl.vx v8, v8, a0
+; RV64-NEXT: ret
+ %head = insertelement <15 x i1> poison, i1 true, i32 0
+ %m = shufflevector <15 x i1> %head, <15 x i1> poison, <15 x i32> zeroinitializer
+ %v = call <15 x i64> @llvm.vp.ctpop.v15i64(<15 x i64> %va, <15 x i1> %m, i32 %evl)
+ ret <15 x i64> %v
+}
+
+declare <16 x i64> @llvm.vp.ctpop.v16i64(<16 x i64>, <16 x i1>, i32)
+
+define <16 x i64> @vp_ctpop_v16i64(<16 x i64> %va, <16 x i1> %m, i32 zeroext %evl) {
+; RV32-LABEL: vp_ctpop_v16i64:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV32-NEXT: vsrl.vi v16, v8, 1, v0.t
+; RV32-NEXT: lui a1, 349525
+; RV32-NEXT: addi a1, a1, 1365
+; RV32-NEXT: li a2, 32
+; RV32-NEXT: vsetvli zero, a2, e32, m8, ta, ma
+; RV32-NEXT: vmv.v.x v24, a1
+; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV32-NEXT: vand.vv v16, v16, v24, v0.t
+; RV32-NEXT: vsub.vv v8, v8, v16, v0.t
+; RV32-NEXT: lui a1, 209715
+; RV32-NEXT: addi a1, a1, 819
+; RV32-NEXT: vsetvli zero, a2, e32, m8, ta, ma
+; RV32-NEXT: vmv.v.x v16, a1
+; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV32-NEXT: vand.vv v24, v8, v16, v0.t
+; RV32-NEXT: vsrl.vi v8, v8, 2, v0.t
+; RV32-NEXT: vand.vv v8, v8, v16, v0.t
+; RV32-NEXT: vadd.vv v8, v24, v8, v0.t
+; RV32-NEXT: vsrl.vi v16, v8, 4, v0.t
+; RV32-NEXT: vadd.vv v8, v8, v16, v0.t
+; RV32-NEXT: lui a1, 61681
+; RV32-NEXT: addi a1, a1, -241
+; RV32-NEXT: vsetvli zero, a2, e32, m8, ta, ma
+; RV32-NEXT: vmv.v.x v16, a1
+; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV32-NEXT: vand.vv v8, v8, v16, v0.t
+; RV32-NEXT: lui a1, 4112
+; RV32-NEXT: addi a1, a1, 257
+; RV32-NEXT: vsetvli zero, a2, e32, m8, ta, ma
+; RV32-NEXT: vmv.v.x v16, a1
+; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV32-NEXT: vmul.vv v8, v8, v16, v0.t
+; RV32-NEXT: li a0, 56
+; RV32-NEXT: vsrl.vx v8, v8, a0, v0.t
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vp_ctpop_v16i64:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV64-NEXT: lui a0, %hi(.LCPI32_0)
+; RV64-NEXT: ld a0, %lo(.LCPI32_0)(a0)
+; RV64-NEXT: lui a1, %hi(.LCPI32_1)
+; RV64-NEXT: ld a1, %lo(.LCPI32_1)(a1)
+; RV64-NEXT: vsrl.vi v16, v8, 1, v0.t
+; RV64-NEXT: vand.vx v16, v16, a0, v0.t
+; RV64-NEXT: vsub.vv v8, v8, v16, v0.t
+; RV64-NEXT: vand.vx v16, v8, a1, v0.t
+; RV64-NEXT: vsrl.vi v8, v8, 2, v0.t
+; RV64-NEXT: vand.vx v8, v8, a1, v0.t
+; RV64-NEXT: vadd.vv v8, v16, v8, v0.t
+; RV64-NEXT: lui a0, %hi(.LCPI32_2)
+; RV64-NEXT: ld a0, %lo(.LCPI32_2)(a0)
+; RV64-NEXT: lui a1, %hi(.LCPI32_3)
+; RV64-NEXT: ld a1, %lo(.LCPI32_3)(a1)
+; RV64-NEXT: vsrl.vi v16, v8, 4, v0.t
+; RV64-NEXT: vadd.vv v8, v8, v16, v0.t
+; RV64-NEXT: vand.vx v8, v8, a0, v0.t
+; RV64-NEXT: vmul.vx v8, v8, a1, v0.t
+; RV64-NEXT: li a0, 56
+; RV64-NEXT: vsrl.vx v8, v8, a0, v0.t
+; RV64-NEXT: ret
+ %v = call <16 x i64> @llvm.vp.ctpop.v16i64(<16 x i64> %va, <16 x i1> %m, i32 %evl)
+ ret <16 x i64> %v
+}
+
+define <16 x i64> @vp_ctpop_v16i64_unmasked(<16 x i64> %va, i32 zeroext %evl) {
+; RV32-LABEL: vp_ctpop_v16i64_unmasked:
+; RV32: # %bb.0:
+; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV32-NEXT: vsrl.vi v16, v8, 1
+; RV32-NEXT: lui a1, 349525
+; RV32-NEXT: addi a1, a1, 1365
+; RV32-NEXT: li a2, 32
+; RV32-NEXT: vsetvli zero, a2, e32, m8, ta, ma
+; RV32-NEXT: vmv.v.x v24, a1
+; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV32-NEXT: vand.vv v16, v16, v24
+; RV32-NEXT: vsub.vv v8, v8, v16
+; RV32-NEXT: lui a1, 209715
+; RV32-NEXT: addi a1, a1, 819
+; RV32-NEXT: vsetvli zero, a2, e32, m8, ta, ma
+; RV32-NEXT: vmv.v.x v16, a1
+; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV32-NEXT: vand.vv v24, v8, v16
+; RV32-NEXT: vsrl.vi v8, v8, 2
+; RV32-NEXT: vand.vv v8, v8, v16
+; RV32-NEXT: vadd.vv v8, v24, v8
+; RV32-NEXT: vsrl.vi v16, v8, 4
+; RV32-NEXT: vadd.vv v8, v8, v16
+; RV32-NEXT: lui a1, 61681
+; RV32-NEXT: addi a1, a1, -241
+; RV32-NEXT: vsetvli zero, a2, e32, m8, ta, ma
+; RV32-NEXT: vmv.v.x v16, a1
+; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV32-NEXT: vand.vv v8, v8, v16
+; RV32-NEXT: lui a1, 4112
+; RV32-NEXT: addi a1, a1, 257
+; RV32-NEXT: vsetvli zero, a2, e32, m8, ta, ma
+; RV32-NEXT: vmv.v.x v16, a1
+; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV32-NEXT: vmul.vv v8, v8, v16
+; RV32-NEXT: li a0, 56
+; RV32-NEXT: vsrl.vx v8, v8, a0
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vp_ctpop_v16i64_unmasked:
+; RV64: # %bb.0:
+; RV64-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV64-NEXT: lui a0, %hi(.LCPI33_0)
+; RV64-NEXT: ld a0, %lo(.LCPI33_0)(a0)
+; RV64-NEXT: lui a1, %hi(.LCPI33_1)
+; RV64-NEXT: ld a1, %lo(.LCPI33_1)(a1)
+; RV64-NEXT: vsrl.vi v16, v8, 1
+; RV64-NEXT: vand.vx v16, v16, a0
+; RV64-NEXT: vsub.vv v8, v8, v16
+; RV64-NEXT: vand.vx v16, v8, a1
+; RV64-NEXT: vsrl.vi v8, v8, 2
+; RV64-NEXT: vand.vx v8, v8, a1
+; RV64-NEXT: vadd.vv v8, v16, v8
+; RV64-NEXT: lui a0, %hi(.LCPI33_2)
+; RV64-NEXT: ld a0, %lo(.LCPI33_2)(a0)
+; RV64-NEXT: lui a1, %hi(.LCPI33_3)
+; RV64-NEXT: ld a1, %lo(.LCPI33_3)(a1)
+; RV64-NEXT: vsrl.vi v16, v8, 4
+; RV64-NEXT: vadd.vv v8, v8, v16
+; RV64-NEXT: vand.vx v8, v8, a0
+; RV64-NEXT: vmul.vx v8, v8, a1
+; RV64-NEXT: li a0, 56
+; RV64-NEXT: vsrl.vx v8, v8, a0
+; RV64-NEXT: ret
+ %head = insertelement <16 x i1> poison, i1 true, i32 0
+ %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer
+ %v = call <16 x i64> @llvm.vp.ctpop.v16i64(<16 x i64> %va, <16 x i1> %m, i32 %evl)
+ ret <16 x i64> %v
+}
+
+declare <32 x i64> @llvm.vp.ctpop.v32i64(<32 x i64>, <32 x i1>, i32)
+
+define <32 x i64> @vp_ctpop_v32i64(<32 x i64> %va, <32 x i1> %m, i32 zeroext %evl) {
+; RV32-LABEL: vp_ctpop_v32i64:
+; RV32: # %bb.0:
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: .cfi_def_cfa_offset 16
+; RV32-NEXT: csrr a1, vlenb
+; RV32-NEXT: li a2, 56
+; RV32-NEXT: mul a1, a1, a2
+; RV32-NEXT: sub sp, sp, a1
+; RV32-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x38, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 56 * vlenb
+; RV32-NEXT: csrr a1, vlenb
+; RV32-NEXT: li a2, 48
+; RV32-NEXT: mul a1, a1, a2
+; RV32-NEXT: add a1, sp, a1
+; RV32-NEXT: addi a1, a1, 16
+; RV32-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill
+; RV32-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
+; RV32-NEXT: li a2, 16
+; RV32-NEXT: vslidedown.vi v24, v0, 2
+; RV32-NEXT: mv a1, a0
+; RV32-NEXT: bltu a0, a2, .LBB34_2
+; RV32-NEXT: # %bb.1:
+; RV32-NEXT: li a1, 16
+; RV32-NEXT: .LBB34_2:
+; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma
+; RV32-NEXT: vsrl.vi v16, v8, 1, v0.t
+; RV32-NEXT: lui a2, 349525
+; RV32-NEXT: addi a2, a2, 1365
+; RV32-NEXT: li a3, 32
+; RV32-NEXT: vsetvli zero, a3, e32, m8, ta, ma
+; RV32-NEXT: csrr a4, vlenb
+; RV32-NEXT: li a5, 40
+; RV32-NEXT: mul a4, a4, a5
+; RV32-NEXT: add a4, sp, a4
+; RV32-NEXT: addi a4, a4, 16
+; RV32-NEXT: vs8r.v v8, (a4) # Unknown-size Folded Spill
+; RV32-NEXT: vmv.v.x v8, a2
+; RV32-NEXT: csrr a2, vlenb
+; RV32-NEXT: li a4, 24
+; RV32-NEXT: mul a2, a2, a4
+; RV32-NEXT: add a2, sp, a2
+; RV32-NEXT: addi a2, a2, 16
+; RV32-NEXT: vs8r.v v8, (a2) # Unknown-size Folded Spill
+; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma
+; RV32-NEXT: vand.vv v16, v16, v8, v0.t
+; RV32-NEXT: csrr a2, vlenb
+; RV32-NEXT: li a4, 40
+; RV32-NEXT: mul a2, a2, a4
+; RV32-NEXT: add a2, sp, a2
+; RV32-NEXT: addi a2, a2, 16
+; RV32-NEXT: vl8r.v v8, (a2) # Unknown-size Folded Reload
+; RV32-NEXT: vsub.vv v8, v8, v16, v0.t
+; RV32-NEXT: csrr a2, vlenb
+; RV32-NEXT: slli a2, a2, 5
+; RV32-NEXT: add a2, sp, a2
+; RV32-NEXT: addi a2, a2, 16
+; RV32-NEXT: vs8r.v v8, (a2) # Unknown-size Folded Spill
+; RV32-NEXT: lui a2, 209715
+; RV32-NEXT: addi a2, a2, 819
+; RV32-NEXT: vsetvli zero, a3, e32, m8, ta, ma
+; RV32-NEXT: vmv.v.x v8, a2
+; RV32-NEXT: csrr a2, vlenb
+; RV32-NEXT: li a4, 40
+; RV32-NEXT: mul a2, a2, a4
+; RV32-NEXT: add a2, sp, a2
+; RV32-NEXT: addi a2, a2, 16
+; RV32-NEXT: vs8r.v v8, (a2) # Unknown-size Folded Spill
+; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma
+; RV32-NEXT: csrr a2, vlenb
+; RV32-NEXT: slli a2, a2, 5
+; RV32-NEXT: add a2, sp, a2
+; RV32-NEXT: addi a2, a2, 16
+; RV32-NEXT: vl8r.v v16, (a2) # Unknown-size Folded Reload
+; RV32-NEXT: vand.vv v16, v16, v8, v0.t
+; RV32-NEXT: csrr a2, vlenb
+; RV32-NEXT: slli a2, a2, 4
+; RV32-NEXT: add a2, sp, a2
+; RV32-NEXT: addi a2, a2, 16
+; RV32-NEXT: vs8r.v v16, (a2) # Unknown-size Folded Spill
+; RV32-NEXT: csrr a2, vlenb
+; RV32-NEXT: slli a2, a2, 5
+; RV32-NEXT: add a2, sp, a2
+; RV32-NEXT: addi a2, a2, 16
+; RV32-NEXT: vl8r.v v16, (a2) # Unknown-size Folded Reload
+; RV32-NEXT: vsrl.vi v16, v16, 2, v0.t
+; RV32-NEXT: vand.vv v16, v16, v8, v0.t
+; RV32-NEXT: csrr a2, vlenb
+; RV32-NEXT: slli a2, a2, 4
+; RV32-NEXT: add a2, sp, a2
+; RV32-NEXT: addi a2, a2, 16
+; RV32-NEXT: vl8r.v v8, (a2) # Unknown-size Folded Reload
+; RV32-NEXT: vadd.vv v16, v8, v16, v0.t
+; RV32-NEXT: vsrl.vi v8, v16, 4, v0.t
+; RV32-NEXT: vadd.vv v16, v16, v8, v0.t
+; RV32-NEXT: lui a2, 61681
+; RV32-NEXT: addi a2, a2, -241
+; RV32-NEXT: vsetvli zero, a3, e32, m8, ta, ma
+; RV32-NEXT: vmv.v.x v8, a2
+; RV32-NEXT: csrr a2, vlenb
+; RV32-NEXT: slli a2, a2, 5
+; RV32-NEXT: add a2, sp, a2
+; RV32-NEXT: addi a2, a2, 16
+; RV32-NEXT: vs8r.v v8, (a2) # Unknown-size Folded Spill
+; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma
+; RV32-NEXT: vand.vv v8, v16, v8, v0.t
+; RV32-NEXT: lui a2, 4112
+; RV32-NEXT: addi a2, a2, 257
+; RV32-NEXT: vsetvli zero, a3, e32, m8, ta, ma
+; RV32-NEXT: vmv.v.x v16, a2
+; RV32-NEXT: csrr a2, vlenb
+; RV32-NEXT: slli a2, a2, 3
+; RV32-NEXT: add a2, sp, a2
+; RV32-NEXT: addi a2, a2, 16
+; RV32-NEXT: vs8r.v v16, (a2) # Unknown-size Folded Spill
+; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma
+; RV32-NEXT: vmul.vv v8, v8, v16, v0.t
+; RV32-NEXT: li a1, 56
+; RV32-NEXT: vsrl.vx v8, v8, a1, v0.t
+; RV32-NEXT: csrr a2, vlenb
+; RV32-NEXT: slli a2, a2, 4
+; RV32-NEXT: add a2, sp, a2
+; RV32-NEXT: addi a2, a2, 16
+; RV32-NEXT: vs8r.v v8, (a2) # Unknown-size Folded Spill
+; RV32-NEXT: addi a2, a0, -16
+; RV32-NEXT: sltu a0, a0, a2
+; RV32-NEXT: addi a0, a0, -1
+; RV32-NEXT: and a0, a0, a2
+; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV32-NEXT: vmv1r.v v0, v24
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: li a2, 48
+; RV32-NEXT: mul a0, a0, a2
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vsrl.vi v8, v8, 1, v0.t
+; RV32-NEXT: addi a0, sp, 16
+; RV32-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: li a2, 24
+; RV32-NEXT: mul a0, a0, a2
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: addi a0, sp, 16
+; RV32-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vand.vv v8, v16, v8, v0.t
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: li a2, 48
+; RV32-NEXT: mul a0, a0, a2
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vsub.vv v8, v16, v8, v0.t
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: li a2, 40
+; RV32-NEXT: mul a0, a0, a2
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vand.vv v16, v8, v16, v0.t
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: li a2, 48
+; RV32-NEXT: mul a0, a0, a2
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vs8r.v v16, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: vsrl.vi v8, v8, 2, v0.t
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: li a2, 40
+; RV32-NEXT: mul a0, a0, a2
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vand.vv v8, v8, v16, v0.t
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: li a2, 48
+; RV32-NEXT: mul a0, a0, a2
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vadd.vv v8, v16, v8, v0.t
+; RV32-NEXT: vsrl.vi v16, v8, 4, v0.t
+; RV32-NEXT: vadd.vv v8, v8, v16, v0.t
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 5
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vand.vv v8, v8, v16, v0.t
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 3
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vmul.vv v8, v8, v16, v0.t
+; RV32-NEXT: vsrl.vx v16, v8, a1, v0.t
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 4
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: li a1, 56
+; RV32-NEXT: mul a0, a0, a1
+; RV32-NEXT: add sp, sp, a0
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vp_ctpop_v32i64:
+; RV64: # %bb.0:
+; RV64-NEXT: addi sp, sp, -16
+; RV64-NEXT: .cfi_def_cfa_offset 16
+; RV64-NEXT: csrr a1, vlenb
+; RV64-NEXT: slli a1, a1, 4
+; RV64-NEXT: sub sp, sp, a1
+; RV64-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 16 * vlenb
+; RV64-NEXT: csrr a1, vlenb
+; RV64-NEXT: slli a1, a1, 3
+; RV64-NEXT: add a1, sp, a1
+; RV64-NEXT: addi a1, a1, 16
+; RV64-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill
+; RV64-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
+; RV64-NEXT: li a2, 16
+; RV64-NEXT: vslidedown.vi v24, v0, 2
+; RV64-NEXT: mv a1, a0
+; RV64-NEXT: bltu a0, a2, .LBB34_2
+; RV64-NEXT: # %bb.1:
+; RV64-NEXT: li a1, 16
+; RV64-NEXT: .LBB34_2:
+; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma
+; RV64-NEXT: lui a1, %hi(.LCPI34_0)
+; RV64-NEXT: ld a1, %lo(.LCPI34_0)(a1)
+; RV64-NEXT: lui a2, %hi(.LCPI34_1)
+; RV64-NEXT: ld a2, %lo(.LCPI34_1)(a2)
+; RV64-NEXT: vsrl.vi v16, v8, 1, v0.t
+; RV64-NEXT: vand.vx v16, v16, a1, v0.t
+; RV64-NEXT: vsub.vv v8, v8, v16, v0.t
+; RV64-NEXT: vand.vx v16, v8, a2, v0.t
+; RV64-NEXT: vsrl.vi v8, v8, 2, v0.t
+; RV64-NEXT: vand.vx v8, v8, a2, v0.t
+; RV64-NEXT: vadd.vv v8, v16, v8, v0.t
+; RV64-NEXT: lui a3, %hi(.LCPI34_2)
+; RV64-NEXT: ld a3, %lo(.LCPI34_2)(a3)
+; RV64-NEXT: lui a4, %hi(.LCPI34_3)
+; RV64-NEXT: ld a4, %lo(.LCPI34_3)(a4)
+; RV64-NEXT: vsrl.vi v16, v8, 4, v0.t
+; RV64-NEXT: vadd.vv v8, v8, v16, v0.t
+; RV64-NEXT: vand.vx v8, v8, a3, v0.t
+; RV64-NEXT: vmul.vx v8, v8, a4, v0.t
+; RV64-NEXT: li a5, 56
+; RV64-NEXT: vsrl.vx v8, v8, a5, v0.t
+; RV64-NEXT: addi a6, sp, 16
+; RV64-NEXT: vs8r.v v8, (a6) # Unknown-size Folded Spill
+; RV64-NEXT: addi a6, a0, -16
+; RV64-NEXT: sltu a0, a0, a6
+; RV64-NEXT: addi a0, a0, -1
+; RV64-NEXT: and a0, a0, a6
+; RV64-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV64-NEXT: vmv1r.v v0, v24
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 3
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
+; RV64-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vsrl.vi v16, v8, 1, v0.t
+; RV64-NEXT: vand.vx v16, v16, a1, v0.t
+; RV64-NEXT: vsub.vv v16, v8, v16, v0.t
+; RV64-NEXT: vand.vx v8, v16, a2, v0.t
+; RV64-NEXT: vsrl.vi v16, v16, 2, v0.t
+; RV64-NEXT: vand.vx v16, v16, a2, v0.t
+; RV64-NEXT: vadd.vv v8, v8, v16, v0.t
+; RV64-NEXT: vsrl.vi v16, v8, 4, v0.t
+; RV64-NEXT: vadd.vv v8, v8, v16, v0.t
+; RV64-NEXT: vand.vx v8, v8, a3, v0.t
+; RV64-NEXT: vmul.vx v8, v8, a4, v0.t
+; RV64-NEXT: vsrl.vx v16, v8, a5, v0.t
+; RV64-NEXT: addi a0, sp, 16
+; RV64-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 4
+; RV64-NEXT: add sp, sp, a0
+; RV64-NEXT: addi sp, sp, 16
+; RV64-NEXT: ret
+ %v = call <32 x i64> @llvm.vp.ctpop.v32i64(<32 x i64> %va, <32 x i1> %m, i32 %evl)
+ ret <32 x i64> %v
+}
+
+define <32 x i64> @vp_ctpop_v32i64_unmasked(<32 x i64> %va, i32 zeroext %evl) {
+; RV32-LABEL: vp_ctpop_v32i64_unmasked:
+; RV32: # %bb.0:
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: .cfi_def_cfa_offset 16
+; RV32-NEXT: csrr a1, vlenb
+; RV32-NEXT: li a2, 40
+; RV32-NEXT: mul a1, a1, a2
+; RV32-NEXT: sub sp, sp, a1
+; RV32-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x28, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 40 * vlenb
+; RV32-NEXT: li a2, 16
+; RV32-NEXT: csrr a1, vlenb
+; RV32-NEXT: slli a1, a1, 5
+; RV32-NEXT: add a1, sp, a1
+; RV32-NEXT: addi a1, a1, 16
+; RV32-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill
+; RV32-NEXT: mv a1, a0
+; RV32-NEXT: bltu a0, a2, .LBB35_2
+; RV32-NEXT: # %bb.1:
+; RV32-NEXT: li a1, 16
+; RV32-NEXT: .LBB35_2:
+; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma
+; RV32-NEXT: vsrl.vi v16, v8, 1
+; RV32-NEXT: lui a2, 349525
+; RV32-NEXT: addi a2, a2, 1365
+; RV32-NEXT: li a3, 32
+; RV32-NEXT: vsetvli zero, a3, e32, m8, ta, ma
+; RV32-NEXT: vmv.v.x v24, a2
+; RV32-NEXT: csrr a2, vlenb
+; RV32-NEXT: li a4, 24
+; RV32-NEXT: mul a2, a2, a4
+; RV32-NEXT: add a2, sp, a2
+; RV32-NEXT: addi a2, a2, 16
+; RV32-NEXT: vs8r.v v24, (a2) # Unknown-size Folded Spill
+; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma
+; RV32-NEXT: vand.vv v16, v16, v24
+; RV32-NEXT: vsub.vv v8, v8, v16
+; RV32-NEXT: lui a2, 209715
+; RV32-NEXT: addi a2, a2, 819
+; RV32-NEXT: vsetvli zero, a3, e32, m8, ta, ma
+; RV32-NEXT: vmv.v.x v0, a2
+; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma
+; RV32-NEXT: vand.vv v16, v8, v0
+; RV32-NEXT: vsrl.vi v8, v8, 2
+; RV32-NEXT: vand.vv v8, v8, v0
+; RV32-NEXT: vadd.vv v8, v16, v8
+; RV32-NEXT: vsrl.vi v16, v8, 4
+; RV32-NEXT: vadd.vv v8, v8, v16
+; RV32-NEXT: lui a2, 61681
+; RV32-NEXT: addi a2, a2, -241
+; RV32-NEXT: vsetvli zero, a3, e32, m8, ta, ma
+; RV32-NEXT: vmv.v.x v16, a2
+; RV32-NEXT: csrr a2, vlenb
+; RV32-NEXT: slli a2, a2, 4
+; RV32-NEXT: add a2, sp, a2
+; RV32-NEXT: addi a2, a2, 16
+; RV32-NEXT: vs8r.v v16, (a2) # Unknown-size Folded Spill
+; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma
+; RV32-NEXT: vand.vv v24, v8, v16
+; RV32-NEXT: lui a2, 4112
+; RV32-NEXT: addi a2, a2, 257
+; RV32-NEXT: vsetvli zero, a3, e32, m8, ta, ma
+; RV32-NEXT: vmv.v.x v8, a2
+; RV32-NEXT: addi a2, sp, 16
+; RV32-NEXT: vs8r.v v8, (a2) # Unknown-size Folded Spill
+; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma
+; RV32-NEXT: vmul.vv v24, v24, v8
+; RV32-NEXT: li a1, 56
+; RV32-NEXT: vsrl.vx v8, v24, a1
+; RV32-NEXT: csrr a2, vlenb
+; RV32-NEXT: slli a2, a2, 3
+; RV32-NEXT: add a2, sp, a2
+; RV32-NEXT: addi a2, a2, 16
+; RV32-NEXT: vs8r.v v8, (a2) # Unknown-size Folded Spill
+; RV32-NEXT: addi a2, a0, -16
+; RV32-NEXT: sltu a0, a0, a2
+; RV32-NEXT: addi a0, a0, -1
+; RV32-NEXT: and a0, a0, a2
+; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 5
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vsrl.vi v24, v8, 1
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: li a2, 24
+; RV32-NEXT: mul a0, a0, a2
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vand.vv v24, v24, v16
+; RV32-NEXT: vsub.vv v24, v8, v24
+; RV32-NEXT: vand.vv v8, v24, v0
+; RV32-NEXT: vsrl.vi v24, v24, 2
+; RV32-NEXT: vand.vv v24, v24, v0
+; RV32-NEXT: vadd.vv v8, v8, v24
+; RV32-NEXT: vsrl.vi v24, v8, 4
+; RV32-NEXT: vadd.vv v8, v8, v24
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 4
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vand.vv v8, v8, v16
+; RV32-NEXT: addi a0, sp, 16
+; RV32-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vmul.vv v8, v8, v16
+; RV32-NEXT: vsrl.vx v16, v8, a1
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 3
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 16
+; RV32-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: li a1, 40
+; RV32-NEXT: mul a0, a0, a1
+; RV32-NEXT: add sp, sp, a0
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: vp_ctpop_v32i64_unmasked:
+; RV64: # %bb.0:
+; RV64-NEXT: li a2, 16
+; RV64-NEXT: mv a1, a0
+; RV64-NEXT: bltu a0, a2, .LBB35_2
+; RV64-NEXT: # %bb.1:
+; RV64-NEXT: li a1, 16
+; RV64-NEXT: .LBB35_2:
+; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma
+; RV64-NEXT: lui a1, %hi(.LCPI35_0)
+; RV64-NEXT: ld a1, %lo(.LCPI35_0)(a1)
+; RV64-NEXT: lui a2, %hi(.LCPI35_1)
+; RV64-NEXT: ld a2, %lo(.LCPI35_1)(a2)
+; RV64-NEXT: vsrl.vi v24, v8, 1
+; RV64-NEXT: vand.vx v24, v24, a1
+; RV64-NEXT: vsub.vv v8, v8, v24
+; RV64-NEXT: vand.vx v24, v8, a2
+; RV64-NEXT: vsrl.vi v8, v8, 2
+; RV64-NEXT: vand.vx v8, v8, a2
+; RV64-NEXT: vadd.vv v8, v24, v8
+; RV64-NEXT: lui a3, %hi(.LCPI35_2)
+; RV64-NEXT: ld a3, %lo(.LCPI35_2)(a3)
+; RV64-NEXT: lui a4, %hi(.LCPI35_3)
+; RV64-NEXT: ld a4, %lo(.LCPI35_3)(a4)
+; RV64-NEXT: vsrl.vi v24, v8, 4
+; RV64-NEXT: vadd.vv v8, v8, v24
+; RV64-NEXT: vand.vx v8, v8, a3
+; RV64-NEXT: vmul.vx v8, v8, a4
+; RV64-NEXT: li a5, 56
+; RV64-NEXT: vsrl.vx v8, v8, a5
+; RV64-NEXT: addi a6, a0, -16
+; RV64-NEXT: sltu a0, a0, a6
+; RV64-NEXT: addi a0, a0, -1
+; RV64-NEXT: and a0, a0, a6
+; RV64-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV64-NEXT: vsrl.vi v24, v16, 1
+; RV64-NEXT: vand.vx v24, v24, a1
+; RV64-NEXT: vsub.vv v16, v16, v24
+; RV64-NEXT: vand.vx v24, v16, a2
+; RV64-NEXT: vsrl.vi v16, v16, 2
+; RV64-NEXT: vand.vx v16, v16, a2
+; RV64-NEXT: vadd.vv v16, v24, v16
+; RV64-NEXT: vsrl.vi v24, v16, 4
+; RV64-NEXT: vadd.vv v16, v16, v24
+; RV64-NEXT: vand.vx v16, v16, a3
+; RV64-NEXT: vmul.vx v16, v16, a4
+; RV64-NEXT: vsrl.vx v16, v16, a5
+; RV64-NEXT: ret
+ %head = insertelement <32 x i1> poison, i1 true, i32 0
+ %m = shufflevector <32 x i1> %head, <32 x i1> poison, <32 x i32> zeroinitializer
+ %v = call <32 x i64> @llvm.vp.ctpop.v32i64(<32 x i64> %va, <32 x i1> %m, i32 %evl)
+ ret <32 x i64> %v
+}