Rename vpopc/vmandnot/vmornot to vcpop/vmandn/vmorn assembler mnemonic.
Reviewed By: frasercrmck, jrtc27, craig.topper
Differential Revision: https://reviews.llvm.org/D111062
// 16.1. Vector Mask-Register Logical Instructions
def vmand : RVVMaskBinBuiltin;
def vmnand : RVVMaskBinBuiltin;
-def vmandnot : RVVMaskBinBuiltin;
+def vmandn : RVVMaskBinBuiltin;
def vmxor : RVVMaskBinBuiltin;
def vmor : RVVMaskBinBuiltin;
def vmnor : RVVMaskBinBuiltin;
-def vmornot : RVVMaskBinBuiltin;
+def vmorn : RVVMaskBinBuiltin;
def vmxnor : RVVMaskBinBuiltin;
// pseudoinstructions
def vmclr : RVVMaskNullaryBuiltin;
defm vmnot_m : RVVPseudoMaskBuiltin<"vmnand", "c">;
let HasPolicy = false in {
-// 16.2. Vector mask population count vpopc
-def vpopc : RVVMaskOp0Builtin<"um">;
+// 16.2. Vector count population in mask vcpop.m
+def vcpop : RVVMaskOp0Builtin<"um">;
// 16.3. vfirst find-first-set mask bit
def vfirst : RVVMaskOp0Builtin<"lm">;
--- /dev/null
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: @test_vcpop_m_b1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vcpop.nxv64i1.i64(<vscale x 64 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret i64 [[TMP0]]
+//
+unsigned long test_vcpop_m_b1(vbool1_t op1, size_t vl) {
+ return vcpop(op1, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vcpop_m_b2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vcpop.nxv32i1.i64(<vscale x 32 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret i64 [[TMP0]]
+//
+unsigned long test_vcpop_m_b2(vbool2_t op1, size_t vl) {
+ return vcpop(op1, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vcpop_m_b4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vcpop.nxv16i1.i64(<vscale x 16 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret i64 [[TMP0]]
+//
+unsigned long test_vcpop_m_b4(vbool4_t op1, size_t vl) {
+ return vcpop(op1, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vcpop_m_b8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vcpop.nxv8i1.i64(<vscale x 8 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret i64 [[TMP0]]
+//
+unsigned long test_vcpop_m_b8(vbool8_t op1, size_t vl) {
+ return vcpop(op1, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vcpop_m_b16(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vcpop.nxv4i1.i64(<vscale x 4 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret i64 [[TMP0]]
+//
+unsigned long test_vcpop_m_b16(vbool16_t op1, size_t vl) {
+ return vcpop(op1, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vcpop_m_b32(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vcpop.nxv2i1.i64(<vscale x 2 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret i64 [[TMP0]]
+//
+unsigned long test_vcpop_m_b32(vbool32_t op1, size_t vl) {
+ return vcpop(op1, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vcpop_m_b64(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vcpop.nxv1i1.i64(<vscale x 1 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret i64 [[TMP0]]
+//
+unsigned long test_vcpop_m_b64(vbool64_t op1, size_t vl) {
+ return vcpop(op1, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vcpop_m_b1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vcpop.mask.nxv64i1.i64(<vscale x 64 x i1> [[OP1:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret i64 [[TMP0]]
+//
+unsigned long test_vcpop_m_b1_m(vbool1_t mask, vbool1_t op1, size_t vl) {
+ return vcpop(mask, op1, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vcpop_m_b2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vcpop.mask.nxv32i1.i64(<vscale x 32 x i1> [[OP1:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret i64 [[TMP0]]
+//
+unsigned long test_vcpop_m_b2_m(vbool2_t mask, vbool2_t op1, size_t vl) {
+ return vcpop(mask, op1, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vcpop_m_b4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vcpop.mask.nxv16i1.i64(<vscale x 16 x i1> [[OP1:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret i64 [[TMP0]]
+//
+unsigned long test_vcpop_m_b4_m(vbool4_t mask, vbool4_t op1, size_t vl) {
+ return vcpop(mask, op1, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vcpop_m_b8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vcpop.mask.nxv8i1.i64(<vscale x 8 x i1> [[OP1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret i64 [[TMP0]]
+//
+unsigned long test_vcpop_m_b8_m(vbool8_t mask, vbool8_t op1, size_t vl) {
+ return vcpop(mask, op1, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vcpop_m_b16_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vcpop.mask.nxv4i1.i64(<vscale x 4 x i1> [[OP1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret i64 [[TMP0]]
+//
+unsigned long test_vcpop_m_b16_m(vbool16_t mask, vbool16_t op1, size_t vl) {
+ return vcpop(mask, op1, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vcpop_m_b32_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vcpop.mask.nxv2i1.i64(<vscale x 2 x i1> [[OP1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret i64 [[TMP0]]
+//
+unsigned long test_vcpop_m_b32_m(vbool32_t mask, vbool32_t op1, size_t vl) {
+ return vcpop(mask, op1, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vcpop_m_b64_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vcpop.mask.nxv1i1.i64(<vscale x 1 x i1> [[OP1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret i64 [[TMP0]]
+//
+unsigned long test_vcpop_m_b64_m(vbool64_t mask, vbool64_t op1, size_t vl) {
+ return vcpop(mask, op1, vl);
+}
return vmand(op1, op2, vl);
}
-// CHECK-RV64-LABEL: @test_vmandnot_mm_b1(
+// CHECK-RV64-LABEL: @test_vmandn_mm_b1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmandnot.nxv64i1.i64(<vscale x 64 x i1> [[OP1:%.*]], <vscale x 64 x i1> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmandn.nxv64i1.i64(<vscale x 64 x i1> [[OP1:%.*]], <vscale x 64 x i1> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 64 x i1> [[TMP0]]
//
-vbool1_t test_vmandnot_mm_b1(vbool1_t op1, vbool1_t op2, size_t vl) {
- return vmandnot(op1, op2, vl);
+vbool1_t test_vmandn_mm_b1(vbool1_t op1, vbool1_t op2, size_t vl) {
+ return vmandn(op1, op2, vl);
}
-// CHECK-RV64-LABEL: @test_vmandnot_mm_b2(
+// CHECK-RV64-LABEL: @test_vmandn_mm_b2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmandnot.nxv32i1.i64(<vscale x 32 x i1> [[OP1:%.*]], <vscale x 32 x i1> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmandn.nxv32i1.i64(<vscale x 32 x i1> [[OP1:%.*]], <vscale x 32 x i1> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
//
-vbool2_t test_vmandnot_mm_b2(vbool2_t op1, vbool2_t op2, size_t vl) {
- return vmandnot(op1, op2, vl);
+vbool2_t test_vmandn_mm_b2(vbool2_t op1, vbool2_t op2, size_t vl) {
+ return vmandn(op1, op2, vl);
}
-// CHECK-RV64-LABEL: @test_vmandnot_mm_b4(
+// CHECK-RV64-LABEL: @test_vmandn_mm_b4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmandnot.nxv16i1.i64(<vscale x 16 x i1> [[OP1:%.*]], <vscale x 16 x i1> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmandn.nxv16i1.i64(<vscale x 16 x i1> [[OP1:%.*]], <vscale x 16 x i1> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
-vbool4_t test_vmandnot_mm_b4(vbool4_t op1, vbool4_t op2, size_t vl) {
- return vmandnot(op1, op2, vl);
+vbool4_t test_vmandn_mm_b4(vbool4_t op1, vbool4_t op2, size_t vl) {
+ return vmandn(op1, op2, vl);
}
-// CHECK-RV64-LABEL: @test_vmandnot_mm_b8(
+// CHECK-RV64-LABEL: @test_vmandn_mm_b8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmandnot.nxv8i1.i64(<vscale x 8 x i1> [[OP1:%.*]], <vscale x 8 x i1> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmandn.nxv8i1.i64(<vscale x 8 x i1> [[OP1:%.*]], <vscale x 8 x i1> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
-vbool8_t test_vmandnot_mm_b8(vbool8_t op1, vbool8_t op2, size_t vl) {
- return vmandnot(op1, op2, vl);
+vbool8_t test_vmandn_mm_b8(vbool8_t op1, vbool8_t op2, size_t vl) {
+ return vmandn(op1, op2, vl);
}
-// CHECK-RV64-LABEL: @test_vmandnot_mm_b16(
+// CHECK-RV64-LABEL: @test_vmandn_mm_b16(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmandnot.nxv4i1.i64(<vscale x 4 x i1> [[OP1:%.*]], <vscale x 4 x i1> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmandn.nxv4i1.i64(<vscale x 4 x i1> [[OP1:%.*]], <vscale x 4 x i1> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
-vbool16_t test_vmandnot_mm_b16(vbool16_t op1, vbool16_t op2, size_t vl) {
- return vmandnot(op1, op2, vl);
+vbool16_t test_vmandn_mm_b16(vbool16_t op1, vbool16_t op2, size_t vl) {
+ return vmandn(op1, op2, vl);
}
-// CHECK-RV64-LABEL: @test_vmandnot_mm_b32(
+// CHECK-RV64-LABEL: @test_vmandn_mm_b32(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmandnot.nxv2i1.i64(<vscale x 2 x i1> [[OP1:%.*]], <vscale x 2 x i1> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmandn.nxv2i1.i64(<vscale x 2 x i1> [[OP1:%.*]], <vscale x 2 x i1> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
-vbool32_t test_vmandnot_mm_b32(vbool32_t op1, vbool32_t op2, size_t vl) {
- return vmandnot(op1, op2, vl);
+vbool32_t test_vmandn_mm_b32(vbool32_t op1, vbool32_t op2, size_t vl) {
+ return vmandn(op1, op2, vl);
}
-// CHECK-RV64-LABEL: @test_vmandnot_mm_b64(
+// CHECK-RV64-LABEL: @test_vmandn_mm_b64(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmandnot.nxv1i1.i64(<vscale x 1 x i1> [[OP1:%.*]], <vscale x 1 x i1> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmandn.nxv1i1.i64(<vscale x 1 x i1> [[OP1:%.*]], <vscale x 1 x i1> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
-vbool64_t test_vmandnot_mm_b64(vbool64_t op1, vbool64_t op2, size_t vl) {
- return vmandnot(op1, op2, vl);
+vbool64_t test_vmandn_mm_b64(vbool64_t op1, vbool64_t op2, size_t vl) {
+ return vmandn(op1, op2, vl);
}
return vmor(op1, op2, vl);
}
-// CHECK-RV64-LABEL: @test_vmornot_mm_b1(
+// CHECK-RV64-LABEL: @test_vmorn_mm_b1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmornot.nxv64i1.i64(<vscale x 64 x i1> [[OP1:%.*]], <vscale x 64 x i1> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmorn.nxv64i1.i64(<vscale x 64 x i1> [[OP1:%.*]], <vscale x 64 x i1> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 64 x i1> [[TMP0]]
//
-vbool1_t test_vmornot_mm_b1(vbool1_t op1, vbool1_t op2, size_t vl) {
- return vmornot(op1, op2, vl);
+vbool1_t test_vmorn_mm_b1(vbool1_t op1, vbool1_t op2, size_t vl) {
+ return vmorn(op1, op2, vl);
}
-// CHECK-RV64-LABEL: @test_vmornot_mm_b2(
+// CHECK-RV64-LABEL: @test_vmorn_mm_b2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmornot.nxv32i1.i64(<vscale x 32 x i1> [[OP1:%.*]], <vscale x 32 x i1> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmorn.nxv32i1.i64(<vscale x 32 x i1> [[OP1:%.*]], <vscale x 32 x i1> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
//
-vbool2_t test_vmornot_mm_b2(vbool2_t op1, vbool2_t op2, size_t vl) {
- return vmornot(op1, op2, vl);
+vbool2_t test_vmorn_mm_b2(vbool2_t op1, vbool2_t op2, size_t vl) {
+ return vmorn(op1, op2, vl);
}
-// CHECK-RV64-LABEL: @test_vmornot_mm_b4(
+// CHECK-RV64-LABEL: @test_vmorn_mm_b4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmornot.nxv16i1.i64(<vscale x 16 x i1> [[OP1:%.*]], <vscale x 16 x i1> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmorn.nxv16i1.i64(<vscale x 16 x i1> [[OP1:%.*]], <vscale x 16 x i1> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
-vbool4_t test_vmornot_mm_b4(vbool4_t op1, vbool4_t op2, size_t vl) {
- return vmornot(op1, op2, vl);
+vbool4_t test_vmorn_mm_b4(vbool4_t op1, vbool4_t op2, size_t vl) {
+ return vmorn(op1, op2, vl);
}
-// CHECK-RV64-LABEL: @test_vmornot_mm_b8(
+// CHECK-RV64-LABEL: @test_vmorn_mm_b8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmornot.nxv8i1.i64(<vscale x 8 x i1> [[OP1:%.*]], <vscale x 8 x i1> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmorn.nxv8i1.i64(<vscale x 8 x i1> [[OP1:%.*]], <vscale x 8 x i1> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
-vbool8_t test_vmornot_mm_b8(vbool8_t op1, vbool8_t op2, size_t vl) {
- return vmornot(op1, op2, vl);
+vbool8_t test_vmorn_mm_b8(vbool8_t op1, vbool8_t op2, size_t vl) {
+ return vmorn(op1, op2, vl);
}
-// CHECK-RV64-LABEL: @test_vmornot_mm_b16(
+// CHECK-RV64-LABEL: @test_vmorn_mm_b16(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmornot.nxv4i1.i64(<vscale x 4 x i1> [[OP1:%.*]], <vscale x 4 x i1> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmorn.nxv4i1.i64(<vscale x 4 x i1> [[OP1:%.*]], <vscale x 4 x i1> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
-vbool16_t test_vmornot_mm_b16(vbool16_t op1, vbool16_t op2, size_t vl) {
- return vmornot(op1, op2, vl);
+vbool16_t test_vmorn_mm_b16(vbool16_t op1, vbool16_t op2, size_t vl) {
+ return vmorn(op1, op2, vl);
}
-// CHECK-RV64-LABEL: @test_vmornot_mm_b32(
+// CHECK-RV64-LABEL: @test_vmorn_mm_b32(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmornot.nxv2i1.i64(<vscale x 2 x i1> [[OP1:%.*]], <vscale x 2 x i1> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmorn.nxv2i1.i64(<vscale x 2 x i1> [[OP1:%.*]], <vscale x 2 x i1> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
-vbool32_t test_vmornot_mm_b32(vbool32_t op1, vbool32_t op2, size_t vl) {
- return vmornot(op1, op2, vl);
+vbool32_t test_vmorn_mm_b32(vbool32_t op1, vbool32_t op2, size_t vl) {
+ return vmorn(op1, op2, vl);
}
-// CHECK-RV64-LABEL: @test_vmornot_mm_b64(
+// CHECK-RV64-LABEL: @test_vmorn_mm_b64(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmornot.nxv1i1.i64(<vscale x 1 x i1> [[OP1:%.*]], <vscale x 1 x i1> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmorn.nxv1i1.i64(<vscale x 1 x i1> [[OP1:%.*]], <vscale x 1 x i1> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
-vbool64_t test_vmornot_mm_b64(vbool64_t op1, vbool64_t op2, size_t vl) {
- return vmornot(op1, op2, vl);
+vbool64_t test_vmorn_mm_b64(vbool64_t op1, vbool64_t op2, size_t vl) {
+ return vmorn(op1, op2, vl);
}
+++ /dev/null
-// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
-// REQUIRES: riscv-registered-target
-// RUN: %clang_cc1 -triple riscv64 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
-
-#include <riscv_vector.h>
-
-// CHECK-RV64-LABEL: @test_vpopc_m_b1(
-// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vpopc.nxv64i1.i64(<vscale x 64 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT: ret i64 [[TMP0]]
-//
-unsigned long test_vpopc_m_b1(vbool1_t op1, size_t vl) {
- return vpopc(op1, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vpopc_m_b2(
-// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vpopc.nxv32i1.i64(<vscale x 32 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT: ret i64 [[TMP0]]
-//
-unsigned long test_vpopc_m_b2(vbool2_t op1, size_t vl) {
- return vpopc(op1, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vpopc_m_b4(
-// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vpopc.nxv16i1.i64(<vscale x 16 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT: ret i64 [[TMP0]]
-//
-unsigned long test_vpopc_m_b4(vbool4_t op1, size_t vl) {
- return vpopc(op1, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vpopc_m_b8(
-// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vpopc.nxv8i1.i64(<vscale x 8 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT: ret i64 [[TMP0]]
-//
-unsigned long test_vpopc_m_b8(vbool8_t op1, size_t vl) {
- return vpopc(op1, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vpopc_m_b16(
-// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vpopc.nxv4i1.i64(<vscale x 4 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT: ret i64 [[TMP0]]
-//
-unsigned long test_vpopc_m_b16(vbool16_t op1, size_t vl) {
- return vpopc(op1, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vpopc_m_b32(
-// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vpopc.nxv2i1.i64(<vscale x 2 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT: ret i64 [[TMP0]]
-//
-unsigned long test_vpopc_m_b32(vbool32_t op1, size_t vl) {
- return vpopc(op1, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vpopc_m_b64(
-// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vpopc.nxv1i1.i64(<vscale x 1 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT: ret i64 [[TMP0]]
-//
-unsigned long test_vpopc_m_b64(vbool64_t op1, size_t vl) {
- return vpopc(op1, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vpopc_m_b1_m(
-// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vpopc.mask.nxv64i1.i64(<vscale x 64 x i1> [[OP1:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT: ret i64 [[TMP0]]
-//
-unsigned long test_vpopc_m_b1_m(vbool1_t mask, vbool1_t op1, size_t vl) {
- return vpopc(mask, op1, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vpopc_m_b2_m(
-// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vpopc.mask.nxv32i1.i64(<vscale x 32 x i1> [[OP1:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT: ret i64 [[TMP0]]
-//
-unsigned long test_vpopc_m_b2_m(vbool2_t mask, vbool2_t op1, size_t vl) {
- return vpopc(mask, op1, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vpopc_m_b4_m(
-// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vpopc.mask.nxv16i1.i64(<vscale x 16 x i1> [[OP1:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT: ret i64 [[TMP0]]
-//
-unsigned long test_vpopc_m_b4_m(vbool4_t mask, vbool4_t op1, size_t vl) {
- return vpopc(mask, op1, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vpopc_m_b8_m(
-// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vpopc.mask.nxv8i1.i64(<vscale x 8 x i1> [[OP1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT: ret i64 [[TMP0]]
-//
-unsigned long test_vpopc_m_b8_m(vbool8_t mask, vbool8_t op1, size_t vl) {
- return vpopc(mask, op1, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vpopc_m_b16_m(
-// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vpopc.mask.nxv4i1.i64(<vscale x 4 x i1> [[OP1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT: ret i64 [[TMP0]]
-//
-unsigned long test_vpopc_m_b16_m(vbool16_t mask, vbool16_t op1, size_t vl) {
- return vpopc(mask, op1, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vpopc_m_b32_m(
-// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vpopc.mask.nxv2i1.i64(<vscale x 2 x i1> [[OP1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT: ret i64 [[TMP0]]
-//
-unsigned long test_vpopc_m_b32_m(vbool32_t mask, vbool32_t op1, size_t vl) {
- return vpopc(mask, op1, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vpopc_m_b64_m(
-// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vpopc.mask.nxv1i1.i64(<vscale x 1 x i1> [[OP1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT: ret i64 [[TMP0]]
-//
-unsigned long test_vpopc_m_b64_m(vbool64_t mask, vbool64_t op1, size_t vl) {
- return vpopc(mask, op1, vl);
-}
--- /dev/null
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: @test_vcpop_m_b1(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vcpop.nxv64i1.i64(<vscale x 64 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret i64 [[TMP0]]
+//
+unsigned long test_vcpop_m_b1(vbool1_t op1, size_t vl) {
+ return vcpop_m_b1(op1, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vcpop_m_b2(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vcpop.nxv32i1.i64(<vscale x 32 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret i64 [[TMP0]]
+//
+unsigned long test_vcpop_m_b2(vbool2_t op1, size_t vl) {
+ return vcpop_m_b2(op1, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vcpop_m_b4(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vcpop.nxv16i1.i64(<vscale x 16 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret i64 [[TMP0]]
+//
+unsigned long test_vcpop_m_b4(vbool4_t op1, size_t vl) {
+ return vcpop_m_b4(op1, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vcpop_m_b8(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vcpop.nxv8i1.i64(<vscale x 8 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret i64 [[TMP0]]
+//
+unsigned long test_vcpop_m_b8(vbool8_t op1, size_t vl) {
+ return vcpop_m_b8(op1, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vcpop_m_b16(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vcpop.nxv4i1.i64(<vscale x 4 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret i64 [[TMP0]]
+//
+unsigned long test_vcpop_m_b16(vbool16_t op1, size_t vl) {
+ return vcpop_m_b16(op1, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vcpop_m_b32(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vcpop.nxv2i1.i64(<vscale x 2 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret i64 [[TMP0]]
+//
+unsigned long test_vcpop_m_b32(vbool32_t op1, size_t vl) {
+ return vcpop_m_b32(op1, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vcpop_m_b64(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vcpop.nxv1i1.i64(<vscale x 1 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret i64 [[TMP0]]
+//
+unsigned long test_vcpop_m_b64(vbool64_t op1, size_t vl) {
+ return vcpop_m_b64(op1, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vcpop_m_b1_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vcpop.mask.nxv64i1.i64(<vscale x 64 x i1> [[OP1:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret i64 [[TMP0]]
+//
+unsigned long test_vcpop_m_b1_m(vbool1_t mask, vbool1_t op1, size_t vl) {
+ return vcpop_m_b1_m(mask, op1, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vcpop_m_b2_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vcpop.mask.nxv32i1.i64(<vscale x 32 x i1> [[OP1:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret i64 [[TMP0]]
+//
+unsigned long test_vcpop_m_b2_m(vbool2_t mask, vbool2_t op1, size_t vl) {
+ return vcpop_m_b2_m(mask, op1, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vcpop_m_b4_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vcpop.mask.nxv16i1.i64(<vscale x 16 x i1> [[OP1:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret i64 [[TMP0]]
+//
+unsigned long test_vcpop_m_b4_m(vbool4_t mask, vbool4_t op1, size_t vl) {
+ return vcpop_m_b4_m(mask, op1, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vcpop_m_b8_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vcpop.mask.nxv8i1.i64(<vscale x 8 x i1> [[OP1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret i64 [[TMP0]]
+//
+unsigned long test_vcpop_m_b8_m(vbool8_t mask, vbool8_t op1, size_t vl) {
+ return vcpop_m_b8_m(mask, op1, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vcpop_m_b16_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vcpop.mask.nxv4i1.i64(<vscale x 4 x i1> [[OP1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret i64 [[TMP0]]
+//
+unsigned long test_vcpop_m_b16_m(vbool16_t mask, vbool16_t op1, size_t vl) {
+ return vcpop_m_b16_m(mask, op1, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vcpop_m_b32_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vcpop.mask.nxv2i1.i64(<vscale x 2 x i1> [[OP1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret i64 [[TMP0]]
+//
+unsigned long test_vcpop_m_b32_m(vbool32_t mask, vbool32_t op1, size_t vl) {
+ return vcpop_m_b32_m(mask, op1, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vcpop_m_b64_m(
+// CHECK-RV64-NEXT: entry:
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vcpop.mask.nxv1i1.i64(<vscale x 1 x i1> [[OP1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: ret i64 [[TMP0]]
+//
+unsigned long test_vcpop_m_b64_m(vbool64_t mask, vbool64_t op1, size_t vl) {
+ return vcpop_m_b64_m(mask, op1, vl);
+}
return vmand_mm_b64(op1, op2, vl);
}
-// CHECK-RV64-LABEL: @test_vmandnot_mm_b1(
+// CHECK-RV64-LABEL: @test_vmandn_mm_b1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmandnot.nxv64i1.i64(<vscale x 64 x i1> [[OP1:%.*]], <vscale x 64 x i1> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmandn.nxv64i1.i64(<vscale x 64 x i1> [[OP1:%.*]], <vscale x 64 x i1> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 64 x i1> [[TMP0]]
//
-vbool1_t test_vmandnot_mm_b1(vbool1_t op1, vbool1_t op2, size_t vl) {
- return vmandnot_mm_b1(op1, op2, vl);
+vbool1_t test_vmandn_mm_b1(vbool1_t op1, vbool1_t op2, size_t vl) {
+ return vmandn_mm_b1(op1, op2, vl);
}
-// CHECK-RV64-LABEL: @test_vmandnot_mm_b2(
+// CHECK-RV64-LABEL: @test_vmandn_mm_b2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmandnot.nxv32i1.i64(<vscale x 32 x i1> [[OP1:%.*]], <vscale x 32 x i1> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmandn.nxv32i1.i64(<vscale x 32 x i1> [[OP1:%.*]], <vscale x 32 x i1> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
//
-vbool2_t test_vmandnot_mm_b2(vbool2_t op1, vbool2_t op2, size_t vl) {
- return vmandnot_mm_b2(op1, op2, vl);
+vbool2_t test_vmandn_mm_b2(vbool2_t op1, vbool2_t op2, size_t vl) {
+ return vmandn_mm_b2(op1, op2, vl);
}
-// CHECK-RV64-LABEL: @test_vmandnot_mm_b4(
+// CHECK-RV64-LABEL: @test_vmandn_mm_b4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmandnot.nxv16i1.i64(<vscale x 16 x i1> [[OP1:%.*]], <vscale x 16 x i1> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmandn.nxv16i1.i64(<vscale x 16 x i1> [[OP1:%.*]], <vscale x 16 x i1> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
-vbool4_t test_vmandnot_mm_b4(vbool4_t op1, vbool4_t op2, size_t vl) {
- return vmandnot_mm_b4(op1, op2, vl);
+vbool4_t test_vmandn_mm_b4(vbool4_t op1, vbool4_t op2, size_t vl) {
+ return vmandn_mm_b4(op1, op2, vl);
}
-// CHECK-RV64-LABEL: @test_vmandnot_mm_b8(
+// CHECK-RV64-LABEL: @test_vmandn_mm_b8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmandnot.nxv8i1.i64(<vscale x 8 x i1> [[OP1:%.*]], <vscale x 8 x i1> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmandn.nxv8i1.i64(<vscale x 8 x i1> [[OP1:%.*]], <vscale x 8 x i1> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
-vbool8_t test_vmandnot_mm_b8(vbool8_t op1, vbool8_t op2, size_t vl) {
- return vmandnot_mm_b8(op1, op2, vl);
+vbool8_t test_vmandn_mm_b8(vbool8_t op1, vbool8_t op2, size_t vl) {
+ return vmandn_mm_b8(op1, op2, vl);
}
-// CHECK-RV64-LABEL: @test_vmandnot_mm_b16(
+// CHECK-RV64-LABEL: @test_vmandn_mm_b16(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmandnot.nxv4i1.i64(<vscale x 4 x i1> [[OP1:%.*]], <vscale x 4 x i1> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmandn.nxv4i1.i64(<vscale x 4 x i1> [[OP1:%.*]], <vscale x 4 x i1> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
-vbool16_t test_vmandnot_mm_b16(vbool16_t op1, vbool16_t op2, size_t vl) {
- return vmandnot_mm_b16(op1, op2, vl);
+vbool16_t test_vmandn_mm_b16(vbool16_t op1, vbool16_t op2, size_t vl) {
+ return vmandn_mm_b16(op1, op2, vl);
}
-// CHECK-RV64-LABEL: @test_vmandnot_mm_b32(
+// CHECK-RV64-LABEL: @test_vmandn_mm_b32(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmandnot.nxv2i1.i64(<vscale x 2 x i1> [[OP1:%.*]], <vscale x 2 x i1> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmandn.nxv2i1.i64(<vscale x 2 x i1> [[OP1:%.*]], <vscale x 2 x i1> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
-vbool32_t test_vmandnot_mm_b32(vbool32_t op1, vbool32_t op2, size_t vl) {
- return vmandnot_mm_b32(op1, op2, vl);
+vbool32_t test_vmandn_mm_b32(vbool32_t op1, vbool32_t op2, size_t vl) {
+ return vmandn_mm_b32(op1, op2, vl);
}
-// CHECK-RV64-LABEL: @test_vmandnot_mm_b64(
+// CHECK-RV64-LABEL: @test_vmandn_mm_b64(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmandnot.nxv1i1.i64(<vscale x 1 x i1> [[OP1:%.*]], <vscale x 1 x i1> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmandn.nxv1i1.i64(<vscale x 1 x i1> [[OP1:%.*]], <vscale x 1 x i1> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
-vbool64_t test_vmandnot_mm_b64(vbool64_t op1, vbool64_t op2, size_t vl) {
- return vmandnot_mm_b64(op1, op2, vl);
+vbool64_t test_vmandn_mm_b64(vbool64_t op1, vbool64_t op2, size_t vl) {
+ return vmandn_mm_b64(op1, op2, vl);
}
return vmor_mm_b64(op1, op2, vl);
}
-// CHECK-RV64-LABEL: @test_vmornot_mm_b1(
+// CHECK-RV64-LABEL: @test_vmorn_mm_b1(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmornot.nxv64i1.i64(<vscale x 64 x i1> [[OP1:%.*]], <vscale x 64 x i1> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmorn.nxv64i1.i64(<vscale x 64 x i1> [[OP1:%.*]], <vscale x 64 x i1> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 64 x i1> [[TMP0]]
//
-vbool1_t test_vmornot_mm_b1(vbool1_t op1, vbool1_t op2, size_t vl) {
- return vmornot_mm_b1(op1, op2, vl);
+vbool1_t test_vmorn_mm_b1(vbool1_t op1, vbool1_t op2, size_t vl) {
+ return vmorn_mm_b1(op1, op2, vl);
}
-// CHECK-RV64-LABEL: @test_vmornot_mm_b2(
+// CHECK-RV64-LABEL: @test_vmorn_mm_b2(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmornot.nxv32i1.i64(<vscale x 32 x i1> [[OP1:%.*]], <vscale x 32 x i1> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmorn.nxv32i1.i64(<vscale x 32 x i1> [[OP1:%.*]], <vscale x 32 x i1> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
//
-vbool2_t test_vmornot_mm_b2(vbool2_t op1, vbool2_t op2, size_t vl) {
- return vmornot_mm_b2(op1, op2, vl);
+vbool2_t test_vmorn_mm_b2(vbool2_t op1, vbool2_t op2, size_t vl) {
+ return vmorn_mm_b2(op1, op2, vl);
}
-// CHECK-RV64-LABEL: @test_vmornot_mm_b4(
+// CHECK-RV64-LABEL: @test_vmorn_mm_b4(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmornot.nxv16i1.i64(<vscale x 16 x i1> [[OP1:%.*]], <vscale x 16 x i1> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmorn.nxv16i1.i64(<vscale x 16 x i1> [[OP1:%.*]], <vscale x 16 x i1> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
-vbool4_t test_vmornot_mm_b4(vbool4_t op1, vbool4_t op2, size_t vl) {
- return vmornot_mm_b4(op1, op2, vl);
+vbool4_t test_vmorn_mm_b4(vbool4_t op1, vbool4_t op2, size_t vl) {
+ return vmorn_mm_b4(op1, op2, vl);
}
-// CHECK-RV64-LABEL: @test_vmornot_mm_b8(
+// CHECK-RV64-LABEL: @test_vmorn_mm_b8(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmornot.nxv8i1.i64(<vscale x 8 x i1> [[OP1:%.*]], <vscale x 8 x i1> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmorn.nxv8i1.i64(<vscale x 8 x i1> [[OP1:%.*]], <vscale x 8 x i1> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
-vbool8_t test_vmornot_mm_b8(vbool8_t op1, vbool8_t op2, size_t vl) {
- return vmornot_mm_b8(op1, op2, vl);
+vbool8_t test_vmorn_mm_b8(vbool8_t op1, vbool8_t op2, size_t vl) {
+ return vmorn_mm_b8(op1, op2, vl);
}
-// CHECK-RV64-LABEL: @test_vmornot_mm_b16(
+// CHECK-RV64-LABEL: @test_vmorn_mm_b16(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmornot.nxv4i1.i64(<vscale x 4 x i1> [[OP1:%.*]], <vscale x 4 x i1> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmorn.nxv4i1.i64(<vscale x 4 x i1> [[OP1:%.*]], <vscale x 4 x i1> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
-vbool16_t test_vmornot_mm_b16(vbool16_t op1, vbool16_t op2, size_t vl) {
- return vmornot_mm_b16(op1, op2, vl);
+vbool16_t test_vmorn_mm_b16(vbool16_t op1, vbool16_t op2, size_t vl) {
+ return vmorn_mm_b16(op1, op2, vl);
}
-// CHECK-RV64-LABEL: @test_vmornot_mm_b32(
+// CHECK-RV64-LABEL: @test_vmorn_mm_b32(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmornot.nxv2i1.i64(<vscale x 2 x i1> [[OP1:%.*]], <vscale x 2 x i1> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmorn.nxv2i1.i64(<vscale x 2 x i1> [[OP1:%.*]], <vscale x 2 x i1> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
-vbool32_t test_vmornot_mm_b32(vbool32_t op1, vbool32_t op2, size_t vl) {
- return vmornot_mm_b32(op1, op2, vl);
+vbool32_t test_vmorn_mm_b32(vbool32_t op1, vbool32_t op2, size_t vl) {
+ return vmorn_mm_b32(op1, op2, vl);
}
-// CHECK-RV64-LABEL: @test_vmornot_mm_b64(
+// CHECK-RV64-LABEL: @test_vmorn_mm_b64(
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmornot.nxv1i1.i64(<vscale x 1 x i1> [[OP1:%.*]], <vscale x 1 x i1> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmorn.nxv1i1.i64(<vscale x 1 x i1> [[OP1:%.*]], <vscale x 1 x i1> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
-vbool64_t test_vmornot_mm_b64(vbool64_t op1, vbool64_t op2, size_t vl) {
- return vmornot_mm_b64(op1, op2, vl);
+vbool64_t test_vmorn_mm_b64(vbool64_t op1, vbool64_t op2, size_t vl) {
+ return vmorn_mm_b64(op1, op2, vl);
}
+++ /dev/null
-// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
-// REQUIRES: riscv-registered-target
-// RUN: %clang_cc1 -triple riscv64 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
-
-#include <riscv_vector.h>
-
-// CHECK-RV64-LABEL: @test_vpopc_m_b1(
-// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vpopc.nxv64i1.i64(<vscale x 64 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT: ret i64 [[TMP0]]
-//
-unsigned long test_vpopc_m_b1(vbool1_t op1, size_t vl) {
- return vpopc_m_b1(op1, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vpopc_m_b2(
-// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vpopc.nxv32i1.i64(<vscale x 32 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT: ret i64 [[TMP0]]
-//
-unsigned long test_vpopc_m_b2(vbool2_t op1, size_t vl) {
- return vpopc_m_b2(op1, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vpopc_m_b4(
-// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vpopc.nxv16i1.i64(<vscale x 16 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT: ret i64 [[TMP0]]
-//
-unsigned long test_vpopc_m_b4(vbool4_t op1, size_t vl) {
- return vpopc_m_b4(op1, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vpopc_m_b8(
-// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vpopc.nxv8i1.i64(<vscale x 8 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT: ret i64 [[TMP0]]
-//
-unsigned long test_vpopc_m_b8(vbool8_t op1, size_t vl) {
- return vpopc_m_b8(op1, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vpopc_m_b16(
-// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vpopc.nxv4i1.i64(<vscale x 4 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT: ret i64 [[TMP0]]
-//
-unsigned long test_vpopc_m_b16(vbool16_t op1, size_t vl) {
- return vpopc_m_b16(op1, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vpopc_m_b32(
-// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vpopc.nxv2i1.i64(<vscale x 2 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT: ret i64 [[TMP0]]
-//
-unsigned long test_vpopc_m_b32(vbool32_t op1, size_t vl) {
- return vpopc_m_b32(op1, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vpopc_m_b64(
-// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vpopc.nxv1i1.i64(<vscale x 1 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT: ret i64 [[TMP0]]
-//
-unsigned long test_vpopc_m_b64(vbool64_t op1, size_t vl) {
- return vpopc_m_b64(op1, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vpopc_m_b1_m(
-// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vpopc.mask.nxv64i1.i64(<vscale x 64 x i1> [[OP1:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT: ret i64 [[TMP0]]
-//
-unsigned long test_vpopc_m_b1_m(vbool1_t mask, vbool1_t op1, size_t vl) {
- return vpopc_m_b1_m(mask, op1, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vpopc_m_b2_m(
-// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vpopc.mask.nxv32i1.i64(<vscale x 32 x i1> [[OP1:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT: ret i64 [[TMP0]]
-//
-unsigned long test_vpopc_m_b2_m(vbool2_t mask, vbool2_t op1, size_t vl) {
- return vpopc_m_b2_m(mask, op1, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vpopc_m_b4_m(
-// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vpopc.mask.nxv16i1.i64(<vscale x 16 x i1> [[OP1:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT: ret i64 [[TMP0]]
-//
-unsigned long test_vpopc_m_b4_m(vbool4_t mask, vbool4_t op1, size_t vl) {
- return vpopc_m_b4_m(mask, op1, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vpopc_m_b8_m(
-// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vpopc.mask.nxv8i1.i64(<vscale x 8 x i1> [[OP1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT: ret i64 [[TMP0]]
-//
-unsigned long test_vpopc_m_b8_m(vbool8_t mask, vbool8_t op1, size_t vl) {
- return vpopc_m_b8_m(mask, op1, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vpopc_m_b16_m(
-// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vpopc.mask.nxv4i1.i64(<vscale x 4 x i1> [[OP1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT: ret i64 [[TMP0]]
-//
-unsigned long test_vpopc_m_b16_m(vbool16_t mask, vbool16_t op1, size_t vl) {
- return vpopc_m_b16_m(mask, op1, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vpopc_m_b32_m(
-// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vpopc.mask.nxv2i1.i64(<vscale x 2 x i1> [[OP1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT: ret i64 [[TMP0]]
-//
-unsigned long test_vpopc_m_b32_m(vbool32_t mask, vbool32_t op1, size_t vl) {
- return vpopc_m_b32_m(mask, op1, vl);
-}
-
-// CHECK-RV64-LABEL: @test_vpopc_m_b64_m(
-// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.vpopc.mask.nxv1i1.i64(<vscale x 1 x i1> [[OP1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT: ret i64 [[TMP0]]
-//
-unsigned long test_vpopc_m_b64_m(vbool64_t mask, vbool64_t op1, size_t vl) {
- return vpopc_m_b64_m(mask, op1, vl);
-}
def int_riscv_vmand: RISCVBinaryAAANoMask;
def int_riscv_vmnand: RISCVBinaryAAANoMask;
- def int_riscv_vmandnot: RISCVBinaryAAANoMask;
+ def int_riscv_vmandn: RISCVBinaryAAANoMask;
def int_riscv_vmxor: RISCVBinaryAAANoMask;
def int_riscv_vmor: RISCVBinaryAAANoMask;
def int_riscv_vmnor: RISCVBinaryAAANoMask;
- def int_riscv_vmornot: RISCVBinaryAAANoMask;
+ def int_riscv_vmorn: RISCVBinaryAAANoMask;
def int_riscv_vmxnor: RISCVBinaryAAANoMask;
def int_riscv_vmclr : RISCVNullaryIntrinsic;
def int_riscv_vmset : RISCVNullaryIntrinsic;
- defm vpopc : RISCVMaskUnarySOut;
+ defm vcpop : RISCVMaskUnarySOut;
defm vfirst : RISCVMaskUnarySOut;
defm vmsbf : RISCVMaskUnaryMOut;
defm vmsof : RISCVMaskUnaryMOut;
// masked va >= x, vd == v0
//
// pseudoinstruction: vmsge{u}.vx vd, va, x, v0.t, vt
- // expansion: vmslt{u}.vx vt, va, x; vmandnot.mm vd, vd, vt
+ // expansion: vmslt{u}.vx vt, va, x; vmandn.mm vd, vd, vt
assert(Inst.getOperand(0).getReg() == RISCV::V0 &&
"The destination register should be V0.");
assert(Inst.getOperand(1).getReg() != RISCV::V0 &&
.addOperand(Inst.getOperand(2))
.addOperand(Inst.getOperand(3))
.addOperand(Inst.getOperand(4)));
- emitToStreamer(Out, MCInstBuilder(RISCV::VMANDNOT_MM)
+ emitToStreamer(Out, MCInstBuilder(RISCV::VMANDN_MM)
.addOperand(Inst.getOperand(0))
.addOperand(Inst.getOperand(0))
.addOperand(Inst.getOperand(1)));
// masked va >= x, any vd
//
// pseudoinstruction: vmsge{u}.vx vd, va, x, v0.t, vt
- // expansion: vmslt{u}.vx vt, va, x; vmandnot.mm vt, v0, vt; vmandnot.mm vd,
+ // expansion: vmslt{u}.vx vt, va, x; vmandn.mm vt, v0, vt; vmandn.mm vd,
// vd, v0; vmor.mm vd, vt, vd
assert(Inst.getOperand(1).getReg() != RISCV::V0 &&
"The temporary vector register should not be V0.");
.addOperand(Inst.getOperand(2))
.addOperand(Inst.getOperand(3))
.addReg(RISCV::NoRegister));
- emitToStreamer(Out, MCInstBuilder(RISCV::VMANDNOT_MM)
+ emitToStreamer(Out, MCInstBuilder(RISCV::VMANDN_MM)
.addOperand(Inst.getOperand(1))
.addReg(RISCV::V0)
.addOperand(Inst.getOperand(1)));
- emitToStreamer(Out, MCInstBuilder(RISCV::VMANDNOT_MM)
+ emitToStreamer(Out, MCInstBuilder(RISCV::VMANDN_MM)
.addOperand(Inst.getOperand(0))
.addOperand(Inst.getOperand(0))
.addReg(RISCV::V0));
}
bool IsUnsigned = IntNo == Intrinsic::riscv_vmsgeu_mask;
MVT Src1VT = Src1.getSimpleValueType();
- unsigned VMSLTOpcode, VMSLTMaskOpcode, VMXOROpcode, VMANDNOTOpcode;
+ unsigned VMSLTOpcode, VMSLTMaskOpcode, VMXOROpcode, VMANDNOpcode;
switch (RISCVTargetLowering::getLMUL(Src1VT)) {
default:
llvm_unreachable("Unexpected LMUL!");
llvm_unreachable("Unexpected LMUL!");
case RISCVII::VLMUL::LMUL_F8:
VMXOROpcode = RISCV::PseudoVMXOR_MM_MF8;
- VMANDNOTOpcode = RISCV::PseudoVMANDNOT_MM_MF8;
+ VMANDNOpcode = RISCV::PseudoVMANDN_MM_MF8;
break;
case RISCVII::VLMUL::LMUL_F4:
VMXOROpcode = RISCV::PseudoVMXOR_MM_MF4;
- VMANDNOTOpcode = RISCV::PseudoVMANDNOT_MM_MF4;
+ VMANDNOpcode = RISCV::PseudoVMANDN_MM_MF4;
break;
case RISCVII::VLMUL::LMUL_F2:
VMXOROpcode = RISCV::PseudoVMXOR_MM_MF2;
- VMANDNOTOpcode = RISCV::PseudoVMANDNOT_MM_MF2;
+ VMANDNOpcode = RISCV::PseudoVMANDN_MM_MF2;
break;
case RISCVII::VLMUL::LMUL_1:
VMXOROpcode = RISCV::PseudoVMXOR_MM_M1;
- VMANDNOTOpcode = RISCV::PseudoVMANDNOT_MM_M1;
+ VMANDNOpcode = RISCV::PseudoVMANDN_MM_M1;
break;
case RISCVII::VLMUL::LMUL_2:
VMXOROpcode = RISCV::PseudoVMXOR_MM_M2;
- VMANDNOTOpcode = RISCV::PseudoVMANDNOT_MM_M2;
+ VMANDNOpcode = RISCV::PseudoVMANDN_MM_M2;
break;
case RISCVII::VLMUL::LMUL_4:
VMXOROpcode = RISCV::PseudoVMXOR_MM_M4;
- VMANDNOTOpcode = RISCV::PseudoVMANDNOT_MM_M4;
+ VMANDNOpcode = RISCV::PseudoVMANDN_MM_M4;
break;
case RISCVII::VLMUL::LMUL_8:
VMXOROpcode = RISCV::PseudoVMXOR_MM_M8;
- VMANDNOTOpcode = RISCV::PseudoVMANDNOT_MM_M8;
+ VMANDNOpcode = RISCV::PseudoVMANDN_MM_M8;
break;
}
SDValue SEW = CurDAG->getTargetConstant(
SDValue MaskedOff = Node->getOperand(1);
SDValue Mask = Node->getOperand(4);
// If the MaskedOff value and the Mask are the same value use
- // vmslt{u}.vx vt, va, x; vmandnot.mm vd, vd, vt
+ // vmslt{u}.vx vt, va, x; vmandn.mm vd, vd, vt
// This avoids needing to copy v0 to vd before starting the next sequence.
if (Mask == MaskedOff) {
SDValue Cmp = SDValue(
CurDAG->getMachineNode(VMSLTOpcode, DL, VT, {Src1, Src2, VL, SEW}),
0);
- ReplaceNode(Node, CurDAG->getMachineNode(VMANDNOTOpcode, DL, VT,
+ ReplaceNode(Node, CurDAG->getMachineNode(VMANDNOpcode, DL, VT,
{Mask, Cmp, VL, MaskSEW}));
return;
}
llvm_unreachable("Unhandled reduction");
case ISD::VECREDUCE_AND:
case ISD::VP_REDUCE_AND: {
- // vpopc ~x == 0
+ // vcpop ~x == 0
SDValue TrueMask = DAG.getNode(RISCVISD::VMSET_VL, DL, ContainerVT, VL);
Vec = DAG.getNode(RISCVISD::VMXOR_VL, DL, ContainerVT, Vec, TrueMask, VL);
- Vec = DAG.getNode(RISCVISD::VPOPC_VL, DL, XLenVT, Vec, Mask, VL);
+ Vec = DAG.getNode(RISCVISD::VCPOP_VL, DL, XLenVT, Vec, Mask, VL);
CC = ISD::SETEQ;
BaseOpc = ISD::AND;
break;
}
case ISD::VECREDUCE_OR:
case ISD::VP_REDUCE_OR:
- // vpopc x != 0
- Vec = DAG.getNode(RISCVISD::VPOPC_VL, DL, XLenVT, Vec, Mask, VL);
+ // vcpop x != 0
+ Vec = DAG.getNode(RISCVISD::VCPOP_VL, DL, XLenVT, Vec, Mask, VL);
CC = ISD::SETNE;
BaseOpc = ISD::OR;
break;
case ISD::VECREDUCE_XOR:
case ISD::VP_REDUCE_XOR: {
- // ((vpopc x) & 1) != 0
+ // ((vcpop x) & 1) != 0
SDValue One = DAG.getConstant(1, DL, XLenVT);
- Vec = DAG.getNode(RISCVISD::VPOPC_VL, DL, XLenVT, Vec, Mask, VL);
+ Vec = DAG.getNode(RISCVISD::VCPOP_VL, DL, XLenVT, Vec, Mask, VL);
Vec = DAG.getNode(ISD::AND, DL, XLenVT, Vec, One);
CC = ISD::SETNE;
BaseOpc = ISD::XOR;
// Now include the start value in the operation.
// Note that we must return the start value when no elements are operated
- // upon. The vpopc instructions we've emitted in each case above will return
+ // upon. The vcpop instructions we've emitted in each case above will return
// 0 for an inactive vector, and so we've already received the neutral value:
// AND gives us (0 == 0) -> 1 and OR/XOR give us (0 != 0) -> 0. Therefore we
// can simply include the start value.
NODE_NAME_CASE(VRGATHEREI16_VV_VL)
NODE_NAME_CASE(VSEXT_VL)
NODE_NAME_CASE(VZEXT_VL)
- NODE_NAME_CASE(VPOPC_VL)
+ NODE_NAME_CASE(VCPOP_VL)
NODE_NAME_CASE(VLE_VL)
NODE_NAME_CASE(VSE_VL)
NODE_NAME_CASE(READ_CSR)
VSEXT_VL,
VZEXT_VL,
- // vpopc.m with additional mask and VL operands.
- VPOPC_VL,
+ // vcpop.m with additional mask and VL operands.
+ VCPOP_VL,
// Reads value of CSR.
// The first operand is a chain pointer. The second specifies address of the
let RVVConstraint = NoConstraint in {
defm VMAND_M : VMALU_MV_Mask<"vmand", 0b011001, "m">;
defm VMNAND_M : VMALU_MV_Mask<"vmnand", 0b011101, "m">;
-defm VMANDNOT_M : VMALU_MV_Mask<"vmandnot", 0b011000, "m">;
+defm VMANDN_M : VMALU_MV_Mask<"vmandn", 0b011000, "m">;
defm VMXOR_M : VMALU_MV_Mask<"vmxor", 0b011011, "m">;
defm VMOR_M : VMALU_MV_Mask<"vmor", 0b011010, "m">;
defm VMNOR_M : VMALU_MV_Mask<"vmnor", 0b011110, "m">;
-defm VMORNOT_M : VMALU_MV_Mask<"vmornot", 0b011100, "m">;
+defm VMORN_M : VMALU_MV_Mask<"vmorn", 0b011100, "m">;
defm VMXNOR_M : VMALU_MV_Mask<"vmxnor", 0b011111, "m">;
}
def : InstAlias<"vmnot.m $vd, $vs",
(VMNAND_MM VR:$vd, VR:$vs, VR:$vs)>;
+def : InstAlias<"vmandnot.mm $vd, $vs2, $vs1",
+ (VMANDN_MM VR:$vd, VR:$vs2, VR:$vs1), 0>;
+def : InstAlias<"vmornot.mm $vd, $vs2, $vs1",
+ (VMORN_MM VR:$vd, VR:$vs2, VR:$vs1), 0>;
+
let hasSideEffects = 0, mayLoad = 0, mayStore = 0,
RVVConstraint = NoConstraint in {
-// Vector mask population count vpopc
-def VPOPC_M : RVInstV<0b010000, 0b10000, OPMVV, (outs GPR:$vd),
+// Vector mask population count vcpop
+def VCPOP_M : RVInstV<0b010000, 0b10000, OPMVV, (outs GPR:$vd),
(ins VR:$vs2, VMaskOp:$vm),
- "vpopc.m", "$vd, $vs2$vm">,
+ "vcpop.m", "$vd, $vs2$vm">,
Sched<[WriteVMPopV, ReadVMPopV, ReadVMask]>;
// vfirst find-first-set mask bit
} // hasSideEffects = 0, mayLoad = 0, mayStore = 0
+def : InstAlias<"vpopc.m $vd, $vs2$vm",
+ (VCPOP_M GPR:$vd, VR:$vs2, VMaskOp:$vm), 0>;
+
let Constraints = "@earlyclobber $vd", RVVConstraint = Iota in {
// vmsbf.m set-before-first mask bit
defm PseudoVMAND: VPseudoBinaryM_MM;
defm PseudoVMNAND: VPseudoBinaryM_MM;
-defm PseudoVMANDNOT: VPseudoBinaryM_MM;
+defm PseudoVMANDN: VPseudoBinaryM_MM;
defm PseudoVMXOR: VPseudoBinaryM_MM;
defm PseudoVMOR: VPseudoBinaryM_MM;
defm PseudoVMNOR: VPseudoBinaryM_MM;
-defm PseudoVMORNOT: VPseudoBinaryM_MM;
+defm PseudoVMORN: VPseudoBinaryM_MM;
defm PseudoVMXNOR: VPseudoBinaryM_MM;
// Pseudo instructions
defm PseudoVMSET : VPseudoNullaryPseudoM<"VMXNOR">;
//===----------------------------------------------------------------------===//
-// 16.2. Vector mask population count vpopc
+// 16.2. Vector mask population count vcpop
//===----------------------------------------------------------------------===//
-defm PseudoVPOPC: VPseudoUnaryS_M;
+defm PseudoVCPOP: VPseudoUnaryS_M;
//===----------------------------------------------------------------------===//
// 16.3. vfirst find-first-set mask bit
//===----------------------------------------------------------------------===//
defm : VPatBinaryM_MM<"int_riscv_vmand", "PseudoVMAND">;
defm : VPatBinaryM_MM<"int_riscv_vmnand", "PseudoVMNAND">;
-defm : VPatBinaryM_MM<"int_riscv_vmandnot", "PseudoVMANDNOT">;
+defm : VPatBinaryM_MM<"int_riscv_vmandn", "PseudoVMANDN">;
defm : VPatBinaryM_MM<"int_riscv_vmxor", "PseudoVMXOR">;
defm : VPatBinaryM_MM<"int_riscv_vmor", "PseudoVMOR">;
defm : VPatBinaryM_MM<"int_riscv_vmnor", "PseudoVMNOR">;
-defm : VPatBinaryM_MM<"int_riscv_vmornot", "PseudoVMORNOT">;
+defm : VPatBinaryM_MM<"int_riscv_vmorn", "PseudoVMORN">;
defm : VPatBinaryM_MM<"int_riscv_vmxnor", "PseudoVMXNOR">;
// pseudo instructions
defm : VPatNullaryM<"int_riscv_vmset", "PseudoVMSET">;
//===----------------------------------------------------------------------===//
-// 16.2. Vector mask population count vpopc
+// 16.2. Vector count population in mask vcpop.m
//===----------------------------------------------------------------------===//
-defm : VPatUnaryS_M<"int_riscv_vpopc", "PseudoVPOPC">;
+defm : VPatUnaryS_M<"int_riscv_vcpop", "PseudoVCPOP">;
//===----------------------------------------------------------------------===//
// 16.3. vfirst find-first-set mask bit
VR:$rs1, VR:$rs2, mti.AVL, mti.Log2SEW)>;
def : Pat<(mti.Mask (and VR:$rs1, (rvv_vnot VR:$rs2))),
- (!cast<Instruction>("PseudoVMANDNOT_MM_"#mti.LMul.MX)
+ (!cast<Instruction>("PseudoVMANDN_MM_"#mti.LMul.MX)
VR:$rs1, VR:$rs2, mti.AVL, mti.Log2SEW)>;
def : Pat<(mti.Mask (or VR:$rs1, (rvv_vnot VR:$rs2))),
- (!cast<Instruction>("PseudoVMORNOT_MM_"#mti.LMul.MX)
+ (!cast<Instruction>("PseudoVMORN_MM_"#mti.LMul.MX)
VR:$rs1, VR:$rs2, mti.AVL, mti.Log2SEW)>;
// Handle rvv_vnot the same as the vmnot.m pseudoinstruction.
def riscv_vmnot_vl : PatFrag<(ops node:$rs, node:$vl),
(riscv_vmxor_vl node:$rs, true_mask, node:$vl)>;
-def riscv_vpopc_vl : SDNode<"RISCVISD::VPOPC_VL",
+def riscv_vcpop_vl : SDNode<"RISCVISD::VCPOP_VL",
SDTypeProfile<1, 3, [SDTCisVT<0, XLenVT>,
SDTCisVec<1>, SDTCisInt<1>,
SDTCVecEltisVT<2, i1>,
def : Pat<(mti.Mask (riscv_vmand_vl VR:$rs1,
(riscv_vmnot_vl VR:$rs2, VLOpFrag),
VLOpFrag)),
- (!cast<Instruction>("PseudoVMANDNOT_MM_" # mti.LMul.MX)
+ (!cast<Instruction>("PseudoVMANDN_MM_" # mti.LMul.MX)
VR:$rs1, VR:$rs2, GPR:$vl, mti.Log2SEW)>;
def : Pat<(mti.Mask (riscv_vmor_vl VR:$rs1,
(riscv_vmnot_vl VR:$rs2, VLOpFrag),
VLOpFrag)),
- (!cast<Instruction>("PseudoVMORNOT_MM_" # mti.LMul.MX)
+ (!cast<Instruction>("PseudoVMORN_MM_" # mti.LMul.MX)
VR:$rs1, VR:$rs2, GPR:$vl, mti.Log2SEW)>;
// XOR is associative so we need 2 patterns for VMXNOR.
def : Pat<(mti.Mask (riscv_vmxor_vl (riscv_vmnot_vl VR:$rs1,
(!cast<Instruction>("PseudoVMNAND_MM_" # mti.LMul.MX)
VR:$rs, VR:$rs, GPR:$vl, mti.Log2SEW)>;
- // 16.2 Vector Mask Population Count vpopc
- def : Pat<(XLenVT (riscv_vpopc_vl (mti.Mask VR:$rs2), (mti.Mask true_mask),
+ // 16.2 Vector count population in mask vcpop.m
+ def : Pat<(XLenVT (riscv_vcpop_vl (mti.Mask VR:$rs2), (mti.Mask true_mask),
VLOpFrag)),
- (!cast<Instruction>("PseudoVPOPC_M_" # mti.BX)
+ (!cast<Instruction>("PseudoVCPOP_M_" # mti.BX)
VR:$rs2, GPR:$vl, mti.Log2SEW)>;
- def : Pat<(XLenVT (riscv_vpopc_vl (mti.Mask VR:$rs2), (mti.Mask V0),
+ def : Pat<(XLenVT (riscv_vcpop_vl (mti.Mask VR:$rs2), (mti.Mask V0),
VLOpFrag)),
- (!cast<Instruction>("PseudoVPOPC_M_" # mti.BX # "_MASK")
+ (!cast<Instruction>("PseudoVCPOP_M_" # mti.BX # "_MASK")
VR:$rs2, (mti.Mask V0), GPR:$vl, mti.Log2SEW)>;
}
; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu
; CHECK-NEXT: vlm.v v8, (a0)
; CHECK-NEXT: vlm.v v9, (a1)
-; CHECK-NEXT: vmandnot.mm v8, v9, v8
+; CHECK-NEXT: vmandn.mm v8, v9, v8
; CHECK-NEXT: vsm.v v8, (a0)
; CHECK-NEXT: ret
%a = load <8 x i1>, <8 x i1>* %x
; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu
; CHECK-NEXT: vlm.v v8, (a0)
; CHECK-NEXT: vlm.v v9, (a1)
-; CHECK-NEXT: vmornot.mm v8, v9, v8
+; CHECK-NEXT: vmorn.mm v8, v9, v8
; CHECK-NEXT: vsm.v v8, (a0)
; CHECK-NEXT: ret
%a = load <16 x i1>, <16 x i1>* %x
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vmnand.mm v9, v0, v0
; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: vpopc.m a1, v9, v0.t
+; CHECK-NEXT: vcpop.m a1, v9, v0.t
; CHECK-NEXT: seqz a1, a1
; CHECK-NEXT: and a0, a1, a0
; CHECK-NEXT: neg a0, a0
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: vpopc.m a1, v9, v0.t
+; CHECK-NEXT: vcpop.m a1, v9, v0.t
; CHECK-NEXT: snez a1, a1
; CHECK-NEXT: or a0, a1, a0
; CHECK-NEXT: andi a0, a0, 1
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: vpopc.m a1, v9, v0.t
+; CHECK-NEXT: vcpop.m a1, v9, v0.t
; CHECK-NEXT: xor a0, a1, a0
; CHECK-NEXT: andi a0, a0, 1
; CHECK-NEXT: neg a0, a0
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vmnand.mm v9, v0, v0
; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: vpopc.m a1, v9, v0.t
+; CHECK-NEXT: vcpop.m a1, v9, v0.t
; CHECK-NEXT: seqz a1, a1
; CHECK-NEXT: and a0, a1, a0
; CHECK-NEXT: neg a0, a0
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: vpopc.m a1, v9, v0.t
+; CHECK-NEXT: vcpop.m a1, v9, v0.t
; CHECK-NEXT: snez a1, a1
; CHECK-NEXT: or a0, a1, a0
; CHECK-NEXT: andi a0, a0, 1
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: vpopc.m a1, v9, v0.t
+; CHECK-NEXT: vcpop.m a1, v9, v0.t
; CHECK-NEXT: xor a0, a1, a0
; CHECK-NEXT: andi a0, a0, 1
; CHECK-NEXT: neg a0, a0
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vmnand.mm v9, v0, v0
; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: vpopc.m a1, v9, v0.t
+; CHECK-NEXT: vcpop.m a1, v9, v0.t
; CHECK-NEXT: seqz a1, a1
; CHECK-NEXT: and a0, a1, a0
; CHECK-NEXT: neg a0, a0
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: vpopc.m a1, v9, v0.t
+; CHECK-NEXT: vcpop.m a1, v9, v0.t
; CHECK-NEXT: snez a1, a1
; CHECK-NEXT: or a0, a1, a0
; CHECK-NEXT: andi a0, a0, 1
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: vpopc.m a1, v9, v0.t
+; CHECK-NEXT: vcpop.m a1, v9, v0.t
; CHECK-NEXT: xor a0, a1, a0
; CHECK-NEXT: andi a0, a0, 1
; CHECK-NEXT: neg a0, a0
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vmnand.mm v9, v0, v0
; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: vpopc.m a1, v9, v0.t
+; CHECK-NEXT: vcpop.m a1, v9, v0.t
; CHECK-NEXT: seqz a1, a1
; CHECK-NEXT: and a0, a1, a0
; CHECK-NEXT: neg a0, a0
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: vpopc.m a1, v9, v0.t
+; CHECK-NEXT: vcpop.m a1, v9, v0.t
; CHECK-NEXT: snez a1, a1
; CHECK-NEXT: or a0, a1, a0
; CHECK-NEXT: andi a0, a0, 1
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: vpopc.m a1, v9, v0.t
+; CHECK-NEXT: vcpop.m a1, v9, v0.t
; CHECK-NEXT: xor a0, a1, a0
; CHECK-NEXT: andi a0, a0, 1
; CHECK-NEXT: neg a0, a0
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vmnand.mm v9, v0, v0
; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: vpopc.m a1, v9, v0.t
+; CHECK-NEXT: vcpop.m a1, v9, v0.t
; CHECK-NEXT: seqz a1, a1
; CHECK-NEXT: and a0, a1, a0
; CHECK-NEXT: neg a0, a0
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: vpopc.m a1, v9, v0.t
+; CHECK-NEXT: vcpop.m a1, v9, v0.t
; CHECK-NEXT: snez a1, a1
; CHECK-NEXT: or a0, a1, a0
; CHECK-NEXT: andi a0, a0, 1
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: vpopc.m a1, v9, v0.t
+; CHECK-NEXT: vcpop.m a1, v9, v0.t
; CHECK-NEXT: xor a0, a1, a0
; CHECK-NEXT: andi a0, a0, 1
; CHECK-NEXT: neg a0, a0
; CHECK-NEXT: vsetivli zero, 1, e8, mf8, ta, mu
; CHECK-NEXT: vmv.v.x v9, a0
; CHECK-NEXT: vmsne.vi v9, v9, 0
-; CHECK-NEXT: vmandnot.mm v8, v8, v9
+; CHECK-NEXT: vmandn.mm v8, v8, v9
; CHECK-NEXT: vmand.mm v9, v0, v9
; CHECK-NEXT: vmor.mm v0, v9, v8
; CHECK-NEXT: ret
; CHECK-NEXT: vsetivli zero, 1, e8, mf8, ta, mu
; CHECK-NEXT: vmv.v.x v9, a0
; CHECK-NEXT: vmsne.vi v9, v9, 0
-; CHECK-NEXT: vmandnot.mm v8, v8, v9
+; CHECK-NEXT: vmandn.mm v8, v8, v9
; CHECK-NEXT: vmand.mm v9, v0, v9
; CHECK-NEXT: vmor.mm v0, v9, v8
; CHECK-NEXT: ret
; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, mu
; CHECK-NEXT: vmv.v.x v9, a0
; CHECK-NEXT: vmsne.vi v9, v9, 0
-; CHECK-NEXT: vmandnot.mm v8, v8, v9
+; CHECK-NEXT: vmandn.mm v8, v8, v9
; CHECK-NEXT: vmand.mm v9, v0, v9
; CHECK-NEXT: vmor.mm v0, v9, v8
; CHECK-NEXT: ret
; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, mu
; CHECK-NEXT: vmv.v.x v9, a0
; CHECK-NEXT: vmsne.vi v9, v9, 0
-; CHECK-NEXT: vmandnot.mm v8, v8, v9
+; CHECK-NEXT: vmandn.mm v8, v8, v9
; CHECK-NEXT: vmand.mm v9, v0, v9
; CHECK-NEXT: vmor.mm v0, v9, v8
; CHECK-NEXT: ret
; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, mu
; CHECK-NEXT: vmv.v.x v9, a0
; CHECK-NEXT: vmsne.vi v9, v9, 0
-; CHECK-NEXT: vmandnot.mm v8, v8, v9
+; CHECK-NEXT: vmandn.mm v8, v8, v9
; CHECK-NEXT: vmand.mm v9, v0, v9
; CHECK-NEXT: vmor.mm v0, v9, v8
; CHECK-NEXT: ret
; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, mu
; CHECK-NEXT: vmv.v.x v9, a0
; CHECK-NEXT: vmsne.vi v9, v9, 0
-; CHECK-NEXT: vmandnot.mm v8, v8, v9
+; CHECK-NEXT: vmandn.mm v8, v8, v9
; CHECK-NEXT: vmand.mm v9, v0, v9
; CHECK-NEXT: vmor.mm v0, v9, v8
; CHECK-NEXT: ret
; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu
; CHECK-NEXT: vmv.v.x v9, a0
; CHECK-NEXT: vmsne.vi v9, v9, 0
-; CHECK-NEXT: vmandnot.mm v8, v8, v9
+; CHECK-NEXT: vmandn.mm v8, v8, v9
; CHECK-NEXT: vmand.mm v9, v0, v9
; CHECK-NEXT: vmor.mm v0, v9, v8
; CHECK-NEXT: ret
; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu
; CHECK-NEXT: vmv.v.x v9, a0
; CHECK-NEXT: vmsne.vi v9, v9, 0
-; CHECK-NEXT: vmandnot.mm v8, v8, v9
+; CHECK-NEXT: vmandn.mm v8, v8, v9
; CHECK-NEXT: vmand.mm v9, v0, v9
; CHECK-NEXT: vmor.mm v0, v9, v8
; CHECK-NEXT: ret
; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu
; CHECK-NEXT: vmv.v.x v9, a0
; CHECK-NEXT: vmsne.vi v9, v9, 0
-; CHECK-NEXT: vmandnot.mm v8, v8, v9
+; CHECK-NEXT: vmandn.mm v8, v8, v9
; CHECK-NEXT: vmand.mm v9, v0, v9
; CHECK-NEXT: vmor.mm v0, v9, v8
; CHECK-NEXT: ret
; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu
; CHECK-NEXT: vmv.v.x v9, a0
; CHECK-NEXT: vmsne.vi v9, v9, 0
-; CHECK-NEXT: vmandnot.mm v8, v8, v9
+; CHECK-NEXT: vmandn.mm v8, v8, v9
; CHECK-NEXT: vmand.mm v9, v0, v9
; CHECK-NEXT: vmor.mm v0, v9, v8
; CHECK-NEXT: ret
; CHECK-LABEL: vreduce_or_v2i1:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, mu
-; CHECK-NEXT: vpopc.m a0, v0
+; CHECK-NEXT: vcpop.m a0, v0
; CHECK-NEXT: snez a0, a0
; CHECK-NEXT: neg a0, a0
; CHECK-NEXT: ret
; CHECK-LABEL: vreduce_xor_v2i1:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, mu
-; CHECK-NEXT: vpopc.m a0, v0
+; CHECK-NEXT: vcpop.m a0, v0
; CHECK-NEXT: andi a0, a0, 1
; CHECK-NEXT: neg a0, a0
; CHECK-NEXT: ret
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, mu
; CHECK-NEXT: vmnand.mm v8, v0, v0
-; CHECK-NEXT: vpopc.m a0, v8
+; CHECK-NEXT: vcpop.m a0, v8
; CHECK-NEXT: seqz a0, a0
; CHECK-NEXT: neg a0, a0
; CHECK-NEXT: ret
; CHECK-LABEL: vreduce_or_v4i1:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, mu
-; CHECK-NEXT: vpopc.m a0, v0
+; CHECK-NEXT: vcpop.m a0, v0
; CHECK-NEXT: snez a0, a0
; CHECK-NEXT: neg a0, a0
; CHECK-NEXT: ret
; CHECK-LABEL: vreduce_xor_v4i1:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, mu
-; CHECK-NEXT: vpopc.m a0, v0
+; CHECK-NEXT: vcpop.m a0, v0
; CHECK-NEXT: andi a0, a0, 1
; CHECK-NEXT: neg a0, a0
; CHECK-NEXT: ret
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, mu
; CHECK-NEXT: vmnand.mm v8, v0, v0
-; CHECK-NEXT: vpopc.m a0, v8
+; CHECK-NEXT: vcpop.m a0, v8
; CHECK-NEXT: seqz a0, a0
; CHECK-NEXT: neg a0, a0
; CHECK-NEXT: ret
; CHECK-LABEL: vreduce_or_v8i1:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu
-; CHECK-NEXT: vpopc.m a0, v0
+; CHECK-NEXT: vcpop.m a0, v0
; CHECK-NEXT: snez a0, a0
; CHECK-NEXT: neg a0, a0
; CHECK-NEXT: ret
; CHECK-LABEL: vreduce_xor_v8i1:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu
-; CHECK-NEXT: vpopc.m a0, v0
+; CHECK-NEXT: vcpop.m a0, v0
; CHECK-NEXT: andi a0, a0, 1
; CHECK-NEXT: neg a0, a0
; CHECK-NEXT: ret
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu
; CHECK-NEXT: vmnand.mm v8, v0, v0
-; CHECK-NEXT: vpopc.m a0, v8
+; CHECK-NEXT: vcpop.m a0, v8
; CHECK-NEXT: seqz a0, a0
; CHECK-NEXT: neg a0, a0
; CHECK-NEXT: ret
; CHECK-LABEL: vreduce_or_v16i1:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu
-; CHECK-NEXT: vpopc.m a0, v0
+; CHECK-NEXT: vcpop.m a0, v0
; CHECK-NEXT: snez a0, a0
; CHECK-NEXT: neg a0, a0
; CHECK-NEXT: ret
; CHECK-LABEL: vreduce_xor_v16i1:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu
-; CHECK-NEXT: vpopc.m a0, v0
+; CHECK-NEXT: vcpop.m a0, v0
; CHECK-NEXT: andi a0, a0, 1
; CHECK-NEXT: neg a0, a0
; CHECK-NEXT: ret
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu
; CHECK-NEXT: vmnand.mm v8, v0, v0
-; CHECK-NEXT: vpopc.m a0, v8
+; CHECK-NEXT: vcpop.m a0, v8
; CHECK-NEXT: seqz a0, a0
; CHECK-NEXT: neg a0, a0
; CHECK-NEXT: ret
; LMULMAX1: # %bb.0:
; LMULMAX1-NEXT: vsetivli zero, 16, e8, m1, ta, mu
; LMULMAX1-NEXT: vmor.mm v8, v0, v8
-; LMULMAX1-NEXT: vpopc.m a0, v8
+; LMULMAX1-NEXT: vcpop.m a0, v8
; LMULMAX1-NEXT: snez a0, a0
; LMULMAX1-NEXT: neg a0, a0
; LMULMAX1-NEXT: ret
; LMULMAX8: # %bb.0:
; LMULMAX8-NEXT: addi a0, zero, 32
; LMULMAX8-NEXT: vsetvli zero, a0, e8, m2, ta, mu
-; LMULMAX8-NEXT: vpopc.m a0, v0
+; LMULMAX8-NEXT: vcpop.m a0, v0
; LMULMAX8-NEXT: snez a0, a0
; LMULMAX8-NEXT: neg a0, a0
; LMULMAX8-NEXT: ret
; LMULMAX1: # %bb.0:
; LMULMAX1-NEXT: vsetivli zero, 16, e8, m1, ta, mu
; LMULMAX1-NEXT: vmxor.mm v8, v0, v8
-; LMULMAX1-NEXT: vpopc.m a0, v8
+; LMULMAX1-NEXT: vcpop.m a0, v8
; LMULMAX1-NEXT: andi a0, a0, 1
; LMULMAX1-NEXT: neg a0, a0
; LMULMAX1-NEXT: ret
; LMULMAX8: # %bb.0:
; LMULMAX8-NEXT: addi a0, zero, 32
; LMULMAX8-NEXT: vsetvli zero, a0, e8, m2, ta, mu
-; LMULMAX8-NEXT: vpopc.m a0, v0
+; LMULMAX8-NEXT: vcpop.m a0, v0
; LMULMAX8-NEXT: andi a0, a0, 1
; LMULMAX8-NEXT: neg a0, a0
; LMULMAX8-NEXT: ret
; LMULMAX1: # %bb.0:
; LMULMAX1-NEXT: vsetivli zero, 16, e8, m1, ta, mu
; LMULMAX1-NEXT: vmnand.mm v8, v0, v8
-; LMULMAX1-NEXT: vpopc.m a0, v8
+; LMULMAX1-NEXT: vcpop.m a0, v8
; LMULMAX1-NEXT: seqz a0, a0
; LMULMAX1-NEXT: neg a0, a0
; LMULMAX1-NEXT: ret
; LMULMAX8-NEXT: addi a0, zero, 32
; LMULMAX8-NEXT: vsetvli zero, a0, e8, m2, ta, mu
; LMULMAX8-NEXT: vmnand.mm v8, v0, v0
-; LMULMAX8-NEXT: vpopc.m a0, v8
+; LMULMAX8-NEXT: vcpop.m a0, v8
; LMULMAX8-NEXT: seqz a0, a0
; LMULMAX8-NEXT: neg a0, a0
; LMULMAX8-NEXT: ret
; LMULMAX1-NEXT: vmor.mm v8, v8, v10
; LMULMAX1-NEXT: vmor.mm v9, v0, v9
; LMULMAX1-NEXT: vmor.mm v8, v9, v8
-; LMULMAX1-NEXT: vpopc.m a0, v8
+; LMULMAX1-NEXT: vcpop.m a0, v8
; LMULMAX1-NEXT: snez a0, a0
; LMULMAX1-NEXT: neg a0, a0
; LMULMAX1-NEXT: ret
; LMULMAX8: # %bb.0:
; LMULMAX8-NEXT: addi a0, zero, 64
; LMULMAX8-NEXT: vsetvli zero, a0, e8, m4, ta, mu
-; LMULMAX8-NEXT: vpopc.m a0, v0
+; LMULMAX8-NEXT: vcpop.m a0, v0
; LMULMAX8-NEXT: snez a0, a0
; LMULMAX8-NEXT: neg a0, a0
; LMULMAX8-NEXT: ret
; LMULMAX1-NEXT: vmxor.mm v8, v8, v10
; LMULMAX1-NEXT: vmxor.mm v9, v0, v9
; LMULMAX1-NEXT: vmxor.mm v8, v9, v8
-; LMULMAX1-NEXT: vpopc.m a0, v8
+; LMULMAX1-NEXT: vcpop.m a0, v8
; LMULMAX1-NEXT: andi a0, a0, 1
; LMULMAX1-NEXT: neg a0, a0
; LMULMAX1-NEXT: ret
; LMULMAX8: # %bb.0:
; LMULMAX8-NEXT: addi a0, zero, 64
; LMULMAX8-NEXT: vsetvli zero, a0, e8, m4, ta, mu
-; LMULMAX8-NEXT: vpopc.m a0, v0
+; LMULMAX8-NEXT: vcpop.m a0, v0
; LMULMAX8-NEXT: andi a0, a0, 1
; LMULMAX8-NEXT: neg a0, a0
; LMULMAX8-NEXT: ret
; LMULMAX1-NEXT: vmand.mm v8, v8, v10
; LMULMAX1-NEXT: vmand.mm v9, v0, v9
; LMULMAX1-NEXT: vmnand.mm v8, v9, v8
-; LMULMAX1-NEXT: vpopc.m a0, v8
+; LMULMAX1-NEXT: vcpop.m a0, v8
; LMULMAX1-NEXT: seqz a0, a0
; LMULMAX1-NEXT: neg a0, a0
; LMULMAX1-NEXT: ret
; LMULMAX8-NEXT: addi a0, zero, 64
; LMULMAX8-NEXT: vsetvli zero, a0, e8, m4, ta, mu
; LMULMAX8-NEXT: vmnand.mm v8, v0, v0
-; LMULMAX8-NEXT: vpopc.m a0, v8
+; LMULMAX8-NEXT: vcpop.m a0, v8
; LMULMAX8-NEXT: seqz a0, a0
; LMULMAX8-NEXT: neg a0, a0
; LMULMAX8-NEXT: ret
; CHECK-LABEL: vselect_v2i1:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, mu
-; CHECK-NEXT: vmandnot.mm v8, v8, v9
+; CHECK-NEXT: vmandn.mm v8, v8, v9
; CHECK-NEXT: vmand.mm v9, v0, v9
; CHECK-NEXT: vmor.mm v0, v9, v8
; CHECK-NEXT: ret
; CHECK-LABEL: vselect_v4i1:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, mu
-; CHECK-NEXT: vmandnot.mm v8, v8, v9
+; CHECK-NEXT: vmandn.mm v8, v8, v9
; CHECK-NEXT: vmand.mm v9, v0, v9
; CHECK-NEXT: vmor.mm v0, v9, v8
; CHECK-NEXT: ret
; CHECK-LABEL: vselect_v8i1:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu
-; CHECK-NEXT: vmandnot.mm v8, v8, v9
+; CHECK-NEXT: vmandn.mm v8, v8, v9
; CHECK-NEXT: vmand.mm v9, v0, v9
; CHECK-NEXT: vmor.mm v0, v9, v8
; CHECK-NEXT: ret
; CHECK-LABEL: vselect_v16i1:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu
-; CHECK-NEXT: vmandnot.mm v8, v8, v9
+; CHECK-NEXT: vmandn.mm v8, v8, v9
; CHECK-NEXT: vmand.mm v9, v0, v9
; CHECK-NEXT: vmor.mm v0, v9, v8
; CHECK-NEXT: ret
; CHECK: # %bb.0:
; CHECK-NEXT: addi a0, zero, 32
; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu
-; CHECK-NEXT: vmandnot.mm v8, v8, v9
+; CHECK-NEXT: vmandn.mm v8, v8, v9
; CHECK-NEXT: vmand.mm v9, v0, v9
; CHECK-NEXT: vmor.mm v0, v9, v8
; CHECK-NEXT: ret
; CHECK: # %bb.0:
; CHECK-NEXT: addi a0, zero, 64
; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu
-; CHECK-NEXT: vmandnot.mm v8, v8, v9
+; CHECK-NEXT: vmandn.mm v8, v8, v9
; CHECK-NEXT: vmand.mm v9, v0, v9
; CHECK-NEXT: vmor.mm v0, v9, v8
; CHECK-NEXT: ret
; CHECK-NEXT: vsetvli a1, zero, e8, mf8, ta, mu
; CHECK-NEXT: vmv.v.x v9, a0
; CHECK-NEXT: vmsne.vi v9, v9, 0
-; CHECK-NEXT: vmandnot.mm v8, v8, v9
+; CHECK-NEXT: vmandn.mm v8, v8, v9
; CHECK-NEXT: vmand.mm v9, v0, v9
; CHECK-NEXT: vmor.mm v0, v9, v8
; CHECK-NEXT: ret
; CHECK-NEXT: vsetvli a1, zero, e8, mf8, ta, mu
; CHECK-NEXT: vmv.v.x v9, a0
; CHECK-NEXT: vmsne.vi v9, v9, 0
-; CHECK-NEXT: vmandnot.mm v8, v8, v9
+; CHECK-NEXT: vmandn.mm v8, v8, v9
; CHECK-NEXT: vmand.mm v9, v0, v9
; CHECK-NEXT: vmor.mm v0, v9, v8
; CHECK-NEXT: ret
; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, mu
; CHECK-NEXT: vmv.v.x v9, a0
; CHECK-NEXT: vmsne.vi v9, v9, 0
-; CHECK-NEXT: vmandnot.mm v8, v8, v9
+; CHECK-NEXT: vmandn.mm v8, v8, v9
; CHECK-NEXT: vmand.mm v9, v0, v9
; CHECK-NEXT: vmor.mm v0, v9, v8
; CHECK-NEXT: ret
; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, mu
; CHECK-NEXT: vmv.v.x v9, a0
; CHECK-NEXT: vmsne.vi v9, v9, 0
-; CHECK-NEXT: vmandnot.mm v8, v8, v9
+; CHECK-NEXT: vmandn.mm v8, v8, v9
; CHECK-NEXT: vmand.mm v9, v0, v9
; CHECK-NEXT: vmor.mm v0, v9, v8
; CHECK-NEXT: ret
; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, mu
; CHECK-NEXT: vmv.v.x v9, a0
; CHECK-NEXT: vmsne.vi v9, v9, 0
-; CHECK-NEXT: vmandnot.mm v8, v8, v9
+; CHECK-NEXT: vmandn.mm v8, v8, v9
; CHECK-NEXT: vmand.mm v9, v0, v9
; CHECK-NEXT: vmor.mm v0, v9, v8
; CHECK-NEXT: ret
; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, mu
; CHECK-NEXT: vmv.v.x v9, a0
; CHECK-NEXT: vmsne.vi v9, v9, 0
-; CHECK-NEXT: vmandnot.mm v8, v8, v9
+; CHECK-NEXT: vmandn.mm v8, v8, v9
; CHECK-NEXT: vmand.mm v9, v0, v9
; CHECK-NEXT: vmor.mm v0, v9, v8
; CHECK-NEXT: ret
; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, mu
; CHECK-NEXT: vmv.v.x v9, a0
; CHECK-NEXT: vmsne.vi v9, v9, 0
-; CHECK-NEXT: vmandnot.mm v8, v8, v9
+; CHECK-NEXT: vmandn.mm v8, v8, v9
; CHECK-NEXT: vmand.mm v9, v0, v9
; CHECK-NEXT: vmor.mm v0, v9, v8
; CHECK-NEXT: ret
; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, mu
; CHECK-NEXT: vmv.v.x v9, a0
; CHECK-NEXT: vmsne.vi v9, v9, 0
-; CHECK-NEXT: vmandnot.mm v8, v8, v9
+; CHECK-NEXT: vmandn.mm v8, v8, v9
; CHECK-NEXT: vmand.mm v9, v0, v9
; CHECK-NEXT: vmor.mm v0, v9, v8
; CHECK-NEXT: ret
; CHECK-NEXT: vsetvli a1, zero, e8, m2, ta, mu
; CHECK-NEXT: vmv.v.x v10, a0
; CHECK-NEXT: vmsne.vi v9, v10, 0
-; CHECK-NEXT: vmandnot.mm v8, v8, v9
+; CHECK-NEXT: vmandn.mm v8, v8, v9
; CHECK-NEXT: vmand.mm v9, v0, v9
; CHECK-NEXT: vmor.mm v0, v9, v8
; CHECK-NEXT: ret
; CHECK-NEXT: vsetvli a1, zero, e8, m2, ta, mu
; CHECK-NEXT: vmv.v.x v10, a0
; CHECK-NEXT: vmsne.vi v9, v10, 0
-; CHECK-NEXT: vmandnot.mm v8, v8, v9
+; CHECK-NEXT: vmandn.mm v8, v8, v9
; CHECK-NEXT: vmand.mm v9, v0, v9
; CHECK-NEXT: vmor.mm v0, v9, v8
; CHECK-NEXT: ret
; CHECK-NEXT: vsetvli a1, zero, e8, m4, ta, mu
; CHECK-NEXT: vmv.v.x v12, a0
; CHECK-NEXT: vmsne.vi v9, v12, 0
-; CHECK-NEXT: vmandnot.mm v8, v8, v9
+; CHECK-NEXT: vmandn.mm v8, v8, v9
; CHECK-NEXT: vmand.mm v9, v0, v9
; CHECK-NEXT: vmor.mm v0, v9, v8
; CHECK-NEXT: ret
; CHECK-NEXT: vsetvli a1, zero, e8, m4, ta, mu
; CHECK-NEXT: vmv.v.x v12, a0
; CHECK-NEXT: vmsne.vi v9, v12, 0
-; CHECK-NEXT: vmandnot.mm v8, v8, v9
+; CHECK-NEXT: vmandn.mm v8, v8, v9
; CHECK-NEXT: vmand.mm v9, v0, v9
; CHECK-NEXT: vmor.mm v0, v9, v8
; CHECK-NEXT: ret
; CHECK-NEXT: vsetvli a1, zero, e8, m8, ta, mu
; CHECK-NEXT: vmv.v.x v16, a0
; CHECK-NEXT: vmsne.vi v9, v16, 0
-; CHECK-NEXT: vmandnot.mm v8, v8, v9
+; CHECK-NEXT: vmandn.mm v8, v8, v9
; CHECK-NEXT: vmand.mm v9, v0, v9
; CHECK-NEXT: vmor.mm v0, v9, v8
; CHECK-NEXT: ret
; CHECK-NEXT: vsetvli a1, zero, e8, m8, ta, mu
; CHECK-NEXT: vmv.v.x v16, a0
; CHECK-NEXT: vmsne.vi v9, v16, 0
-; CHECK-NEXT: vmandnot.mm v8, v8, v9
+; CHECK-NEXT: vmandn.mm v8, v8, v9
; CHECK-NEXT: vmand.mm v9, v0, v9
; CHECK-NEXT: vmor.mm v0, v9, v8
; CHECK-NEXT: ret
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+f -verify-machineinstrs \
; RUN: < %s | FileCheck %s
-declare i32 @llvm.riscv.vpopc.i32.nxv1i1(
+declare i32 @llvm.riscv.vcpop.i32.nxv1i1(
<vscale x 1 x i1>,
i32);
-define i32 @intrinsic_vpopc_m_i32_nxv1i1(<vscale x 1 x i1> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vpopc_m_i32_nxv1i1:
+define i32 @intrinsic_vcpop_m_i32_nxv1i1(<vscale x 1 x i1> %0, i32 %1) nounwind {
+; CHECK-LABEL: intrinsic_vcpop_m_i32_nxv1i1:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
-; CHECK-NEXT: vpopc.m a0, v0
+; CHECK-NEXT: vcpop.m a0, v0
; CHECK-NEXT: ret
entry:
- %a = call i32 @llvm.riscv.vpopc.i32.nxv1i1(
+ %a = call i32 @llvm.riscv.vcpop.i32.nxv1i1(
<vscale x 1 x i1> %0,
i32 %1)
ret i32 %a
}
-declare i32 @llvm.riscv.vpopc.mask.i32.nxv1i1(
+declare i32 @llvm.riscv.vcpop.mask.i32.nxv1i1(
<vscale x 1 x i1>,
<vscale x 1 x i1>,
i32);
-define i32 @intrinsic_vpopc_mask_m_i32_nxv1i1(<vscale x 1 x i1> %0, <vscale x 1 x i1> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vpopc_mask_m_i32_nxv1i1:
+define i32 @intrinsic_vcpop_mask_m_i32_nxv1i1(<vscale x 1 x i1> %0, <vscale x 1 x i1> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vcpop_mask_m_i32_nxv1i1:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: vpopc.m a0, v9, v0.t
+; CHECK-NEXT: vcpop.m a0, v9, v0.t
; CHECK-NEXT: ret
entry:
- %a = call i32 @llvm.riscv.vpopc.mask.i32.nxv1i1(
+ %a = call i32 @llvm.riscv.vcpop.mask.i32.nxv1i1(
<vscale x 1 x i1> %0,
<vscale x 1 x i1> %1,
i32 %2)
ret i32 %a
}
-declare i32 @llvm.riscv.vpopc.i32.nxv2i1(
+declare i32 @llvm.riscv.vcpop.i32.nxv2i1(
<vscale x 2 x i1>,
i32);
-define i32 @intrinsic_vpopc_m_i32_nxv2i1(<vscale x 2 x i1> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vpopc_m_i32_nxv2i1:
+define i32 @intrinsic_vcpop_m_i32_nxv2i1(<vscale x 2 x i1> %0, i32 %1) nounwind {
+; CHECK-LABEL: intrinsic_vcpop_m_i32_nxv2i1:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
-; CHECK-NEXT: vpopc.m a0, v0
+; CHECK-NEXT: vcpop.m a0, v0
; CHECK-NEXT: ret
entry:
- %a = call i32 @llvm.riscv.vpopc.i32.nxv2i1(
+ %a = call i32 @llvm.riscv.vcpop.i32.nxv2i1(
<vscale x 2 x i1> %0,
i32 %1)
ret i32 %a
}
-declare i32 @llvm.riscv.vpopc.mask.i32.nxv2i1(
+declare i32 @llvm.riscv.vcpop.mask.i32.nxv2i1(
<vscale x 2 x i1>,
<vscale x 2 x i1>,
i32);
-define i32 @intrinsic_vpopc_mask_m_i32_nxv2i1(<vscale x 2 x i1> %0, <vscale x 2 x i1> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vpopc_mask_m_i32_nxv2i1:
+define i32 @intrinsic_vcpop_mask_m_i32_nxv2i1(<vscale x 2 x i1> %0, <vscale x 2 x i1> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vcpop_mask_m_i32_nxv2i1:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: vpopc.m a0, v9, v0.t
+; CHECK-NEXT: vcpop.m a0, v9, v0.t
; CHECK-NEXT: ret
entry:
- %a = call i32 @llvm.riscv.vpopc.mask.i32.nxv2i1(
+ %a = call i32 @llvm.riscv.vcpop.mask.i32.nxv2i1(
<vscale x 2 x i1> %0,
<vscale x 2 x i1> %1,
i32 %2)
ret i32 %a
}
-declare i32 @llvm.riscv.vpopc.i32.nxv4i1(
+declare i32 @llvm.riscv.vcpop.i32.nxv4i1(
<vscale x 4 x i1>,
i32);
-define i32 @intrinsic_vpopc_m_i32_nxv4i1(<vscale x 4 x i1> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vpopc_m_i32_nxv4i1:
+define i32 @intrinsic_vcpop_m_i32_nxv4i1(<vscale x 4 x i1> %0, i32 %1) nounwind {
+; CHECK-LABEL: intrinsic_vcpop_m_i32_nxv4i1:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
-; CHECK-NEXT: vpopc.m a0, v0
+; CHECK-NEXT: vcpop.m a0, v0
; CHECK-NEXT: ret
entry:
- %a = call i32 @llvm.riscv.vpopc.i32.nxv4i1(
+ %a = call i32 @llvm.riscv.vcpop.i32.nxv4i1(
<vscale x 4 x i1> %0,
i32 %1)
ret i32 %a
}
-declare i32 @llvm.riscv.vpopc.mask.i32.nxv4i1(
+declare i32 @llvm.riscv.vcpop.mask.i32.nxv4i1(
<vscale x 4 x i1>,
<vscale x 4 x i1>,
i32);
-define i32 @intrinsic_vpopc_mask_m_i32_nxv4i1(<vscale x 4 x i1> %0, <vscale x 4 x i1> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vpopc_mask_m_i32_nxv4i1:
+define i32 @intrinsic_vcpop_mask_m_i32_nxv4i1(<vscale x 4 x i1> %0, <vscale x 4 x i1> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vcpop_mask_m_i32_nxv4i1:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: vpopc.m a0, v9, v0.t
+; CHECK-NEXT: vcpop.m a0, v9, v0.t
; CHECK-NEXT: ret
entry:
- %a = call i32 @llvm.riscv.vpopc.mask.i32.nxv4i1(
+ %a = call i32 @llvm.riscv.vcpop.mask.i32.nxv4i1(
<vscale x 4 x i1> %0,
<vscale x 4 x i1> %1,
i32 %2)
ret i32 %a
}
-declare i32 @llvm.riscv.vpopc.i32.nxv8i1(
+declare i32 @llvm.riscv.vcpop.i32.nxv8i1(
<vscale x 8 x i1>,
i32);
-define i32 @intrinsic_vpopc_m_i32_nxv8i1(<vscale x 8 x i1> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vpopc_m_i32_nxv8i1:
+define i32 @intrinsic_vcpop_m_i32_nxv8i1(<vscale x 8 x i1> %0, i32 %1) nounwind {
+; CHECK-LABEL: intrinsic_vcpop_m_i32_nxv8i1:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu
-; CHECK-NEXT: vpopc.m a0, v0
+; CHECK-NEXT: vcpop.m a0, v0
; CHECK-NEXT: ret
entry:
- %a = call i32 @llvm.riscv.vpopc.i32.nxv8i1(
+ %a = call i32 @llvm.riscv.vcpop.i32.nxv8i1(
<vscale x 8 x i1> %0,
i32 %1)
ret i32 %a
}
-declare i32 @llvm.riscv.vpopc.mask.i32.nxv8i1(
+declare i32 @llvm.riscv.vcpop.mask.i32.nxv8i1(
<vscale x 8 x i1>,
<vscale x 8 x i1>,
i32);
-define i32 @intrinsic_vpopc_mask_m_i32_nxv8i1(<vscale x 8 x i1> %0, <vscale x 8 x i1> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vpopc_mask_m_i32_nxv8i1:
+define i32 @intrinsic_vcpop_mask_m_i32_nxv8i1(<vscale x 8 x i1> %0, <vscale x 8 x i1> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vcpop_mask_m_i32_nxv8i1:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu
; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: vpopc.m a0, v9, v0.t
+; CHECK-NEXT: vcpop.m a0, v9, v0.t
; CHECK-NEXT: ret
entry:
- %a = call i32 @llvm.riscv.vpopc.mask.i32.nxv8i1(
+ %a = call i32 @llvm.riscv.vcpop.mask.i32.nxv8i1(
<vscale x 8 x i1> %0,
<vscale x 8 x i1> %1,
i32 %2)
ret i32 %a
}
-declare i32 @llvm.riscv.vpopc.i32.nxv16i1(
+declare i32 @llvm.riscv.vcpop.i32.nxv16i1(
<vscale x 16 x i1>,
i32);
-define i32 @intrinsic_vpopc_m_i32_nxv16i1(<vscale x 16 x i1> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vpopc_m_i32_nxv16i1:
+define i32 @intrinsic_vcpop_m_i32_nxv16i1(<vscale x 16 x i1> %0, i32 %1) nounwind {
+; CHECK-LABEL: intrinsic_vcpop_m_i32_nxv16i1:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu
-; CHECK-NEXT: vpopc.m a0, v0
+; CHECK-NEXT: vcpop.m a0, v0
; CHECK-NEXT: ret
entry:
- %a = call i32 @llvm.riscv.vpopc.i32.nxv16i1(
+ %a = call i32 @llvm.riscv.vcpop.i32.nxv16i1(
<vscale x 16 x i1> %0,
i32 %1)
ret i32 %a
}
-declare i32 @llvm.riscv.vpopc.mask.i32.nxv16i1(
+declare i32 @llvm.riscv.vcpop.mask.i32.nxv16i1(
<vscale x 16 x i1>,
<vscale x 16 x i1>,
i32);
-define i32 @intrinsic_vpopc_mask_m_i32_nxv16i1(<vscale x 16 x i1> %0, <vscale x 16 x i1> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vpopc_mask_m_i32_nxv16i1:
+define i32 @intrinsic_vcpop_mask_m_i32_nxv16i1(<vscale x 16 x i1> %0, <vscale x 16 x i1> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vcpop_mask_m_i32_nxv16i1:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu
; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: vpopc.m a0, v9, v0.t
+; CHECK-NEXT: vcpop.m a0, v9, v0.t
; CHECK-NEXT: ret
entry:
- %a = call i32 @llvm.riscv.vpopc.mask.i32.nxv16i1(
+ %a = call i32 @llvm.riscv.vcpop.mask.i32.nxv16i1(
<vscale x 16 x i1> %0,
<vscale x 16 x i1> %1,
i32 %2)
ret i32 %a
}
-declare i32 @llvm.riscv.vpopc.i32.nxv32i1(
+declare i32 @llvm.riscv.vcpop.i32.nxv32i1(
<vscale x 32 x i1>,
i32);
-define i32 @intrinsic_vpopc_m_i32_nxv32i1(<vscale x 32 x i1> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vpopc_m_i32_nxv32i1:
+define i32 @intrinsic_vcpop_m_i32_nxv32i1(<vscale x 32 x i1> %0, i32 %1) nounwind {
+; CHECK-LABEL: intrinsic_vcpop_m_i32_nxv32i1:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu
-; CHECK-NEXT: vpopc.m a0, v0
+; CHECK-NEXT: vcpop.m a0, v0
; CHECK-NEXT: ret
entry:
- %a = call i32 @llvm.riscv.vpopc.i32.nxv32i1(
+ %a = call i32 @llvm.riscv.vcpop.i32.nxv32i1(
<vscale x 32 x i1> %0,
i32 %1)
ret i32 %a
}
-declare i32 @llvm.riscv.vpopc.mask.i32.nxv32i1(
+declare i32 @llvm.riscv.vcpop.mask.i32.nxv32i1(
<vscale x 32 x i1>,
<vscale x 32 x i1>,
i32);
-define i32 @intrinsic_vpopc_mask_m_i32_nxv32i1(<vscale x 32 x i1> %0, <vscale x 32 x i1> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vpopc_mask_m_i32_nxv32i1:
+define i32 @intrinsic_vcpop_mask_m_i32_nxv32i1(<vscale x 32 x i1> %0, <vscale x 32 x i1> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vcpop_mask_m_i32_nxv32i1:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu
; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: vpopc.m a0, v9, v0.t
+; CHECK-NEXT: vcpop.m a0, v9, v0.t
; CHECK-NEXT: ret
entry:
- %a = call i32 @llvm.riscv.vpopc.mask.i32.nxv32i1(
+ %a = call i32 @llvm.riscv.vcpop.mask.i32.nxv32i1(
<vscale x 32 x i1> %0,
<vscale x 32 x i1> %1,
i32 %2)
ret i32 %a
}
-declare i32 @llvm.riscv.vpopc.i32.nxv64i1(
+declare i32 @llvm.riscv.vcpop.i32.nxv64i1(
<vscale x 64 x i1>,
i32);
-define i32 @intrinsic_vpopc_m_i32_nxv64i1(<vscale x 64 x i1> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vpopc_m_i32_nxv64i1:
+define i32 @intrinsic_vcpop_m_i32_nxv64i1(<vscale x 64 x i1> %0, i32 %1) nounwind {
+; CHECK-LABEL: intrinsic_vcpop_m_i32_nxv64i1:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu
-; CHECK-NEXT: vpopc.m a0, v0
+; CHECK-NEXT: vcpop.m a0, v0
; CHECK-NEXT: ret
entry:
- %a = call i32 @llvm.riscv.vpopc.i32.nxv64i1(
+ %a = call i32 @llvm.riscv.vcpop.i32.nxv64i1(
<vscale x 64 x i1> %0,
i32 %1)
ret i32 %a
}
-declare i32 @llvm.riscv.vpopc.mask.i32.nxv64i1(
+declare i32 @llvm.riscv.vcpop.mask.i32.nxv64i1(
<vscale x 64 x i1>,
<vscale x 64 x i1>,
i32);
-define i32 @intrinsic_vpopc_mask_m_i32_nxv64i1(<vscale x 64 x i1> %0, <vscale x 64 x i1> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vpopc_mask_m_i32_nxv64i1:
+define i32 @intrinsic_vcpop_mask_m_i32_nxv64i1(<vscale x 64 x i1> %0, <vscale x 64 x i1> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vcpop_mask_m_i32_nxv64i1:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu
; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: vpopc.m a0, v9, v0.t
+; CHECK-NEXT: vcpop.m a0, v9, v0.t
; CHECK-NEXT: ret
entry:
- %a = call i32 @llvm.riscv.vpopc.mask.i32.nxv64i1(
+ %a = call i32 @llvm.riscv.vcpop.mask.i32.nxv64i1(
<vscale x 64 x i1> %0,
<vscale x 64 x i1> %1,
i32 %2)
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d -verify-machineinstrs \
; RUN: < %s | FileCheck %s
-declare i64 @llvm.riscv.vpopc.i64.nxv1i1(
+declare i64 @llvm.riscv.vcpop.i64.nxv1i1(
<vscale x 1 x i1>,
i64);
-define i64 @intrinsic_vpopc_m_i64_nxv1i1(<vscale x 1 x i1> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vpopc_m_i64_nxv1i1:
+define i64 @intrinsic_vcpop_m_i64_nxv1i1(<vscale x 1 x i1> %0, i64 %1) nounwind {
+; CHECK-LABEL: intrinsic_vcpop_m_i64_nxv1i1:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
-; CHECK-NEXT: vpopc.m a0, v0
+; CHECK-NEXT: vcpop.m a0, v0
; CHECK-NEXT: ret
entry:
- %a = call i64 @llvm.riscv.vpopc.i64.nxv1i1(
+ %a = call i64 @llvm.riscv.vcpop.i64.nxv1i1(
<vscale x 1 x i1> %0,
i64 %1)
ret i64 %a
}
-declare i64 @llvm.riscv.vpopc.mask.i64.nxv1i1(
+declare i64 @llvm.riscv.vcpop.mask.i64.nxv1i1(
<vscale x 1 x i1>,
<vscale x 1 x i1>,
i64);
-define i64 @intrinsic_vpopc_mask_m_i64_nxv1i1(<vscale x 1 x i1> %0, <vscale x 1 x i1> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vpopc_mask_m_i64_nxv1i1:
+define i64 @intrinsic_vcpop_mask_m_i64_nxv1i1(<vscale x 1 x i1> %0, <vscale x 1 x i1> %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vcpop_mask_m_i64_nxv1i1:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: vpopc.m a0, v9, v0.t
+; CHECK-NEXT: vcpop.m a0, v9, v0.t
; CHECK-NEXT: ret
entry:
- %a = call i64 @llvm.riscv.vpopc.mask.i64.nxv1i1(
+ %a = call i64 @llvm.riscv.vcpop.mask.i64.nxv1i1(
<vscale x 1 x i1> %0,
<vscale x 1 x i1> %1,
i64 %2)
ret i64 %a
}
-declare i64 @llvm.riscv.vpopc.i64.nxv2i1(
+declare i64 @llvm.riscv.vcpop.i64.nxv2i1(
<vscale x 2 x i1>,
i64);
-define i64 @intrinsic_vpopc_m_i64_nxv2i1(<vscale x 2 x i1> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vpopc_m_i64_nxv2i1:
+define i64 @intrinsic_vcpop_m_i64_nxv2i1(<vscale x 2 x i1> %0, i64 %1) nounwind {
+; CHECK-LABEL: intrinsic_vcpop_m_i64_nxv2i1:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
-; CHECK-NEXT: vpopc.m a0, v0
+; CHECK-NEXT: vcpop.m a0, v0
; CHECK-NEXT: ret
entry:
- %a = call i64 @llvm.riscv.vpopc.i64.nxv2i1(
+ %a = call i64 @llvm.riscv.vcpop.i64.nxv2i1(
<vscale x 2 x i1> %0,
i64 %1)
ret i64 %a
}
-declare i64 @llvm.riscv.vpopc.mask.i64.nxv2i1(
+declare i64 @llvm.riscv.vcpop.mask.i64.nxv2i1(
<vscale x 2 x i1>,
<vscale x 2 x i1>,
i64);
-define i64 @intrinsic_vpopc_mask_m_i64_nxv2i1(<vscale x 2 x i1> %0, <vscale x 2 x i1> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vpopc_mask_m_i64_nxv2i1:
+define i64 @intrinsic_vcpop_mask_m_i64_nxv2i1(<vscale x 2 x i1> %0, <vscale x 2 x i1> %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vcpop_mask_m_i64_nxv2i1:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: vpopc.m a0, v9, v0.t
+; CHECK-NEXT: vcpop.m a0, v9, v0.t
; CHECK-NEXT: ret
entry:
- %a = call i64 @llvm.riscv.vpopc.mask.i64.nxv2i1(
+ %a = call i64 @llvm.riscv.vcpop.mask.i64.nxv2i1(
<vscale x 2 x i1> %0,
<vscale x 2 x i1> %1,
i64 %2)
ret i64 %a
}
-declare i64 @llvm.riscv.vpopc.i64.nxv4i1(
+declare i64 @llvm.riscv.vcpop.i64.nxv4i1(
<vscale x 4 x i1>,
i64);
-define i64 @intrinsic_vpopc_m_i64_nxv4i1(<vscale x 4 x i1> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vpopc_m_i64_nxv4i1:
+define i64 @intrinsic_vcpop_m_i64_nxv4i1(<vscale x 4 x i1> %0, i64 %1) nounwind {
+; CHECK-LABEL: intrinsic_vcpop_m_i64_nxv4i1:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
-; CHECK-NEXT: vpopc.m a0, v0
+; CHECK-NEXT: vcpop.m a0, v0
; CHECK-NEXT: ret
entry:
- %a = call i64 @llvm.riscv.vpopc.i64.nxv4i1(
+ %a = call i64 @llvm.riscv.vcpop.i64.nxv4i1(
<vscale x 4 x i1> %0,
i64 %1)
ret i64 %a
}
-declare i64 @llvm.riscv.vpopc.mask.i64.nxv4i1(
+declare i64 @llvm.riscv.vcpop.mask.i64.nxv4i1(
<vscale x 4 x i1>,
<vscale x 4 x i1>,
i64);
-define i64 @intrinsic_vpopc_mask_m_i64_nxv4i1(<vscale x 4 x i1> %0, <vscale x 4 x i1> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vpopc_mask_m_i64_nxv4i1:
+define i64 @intrinsic_vcpop_mask_m_i64_nxv4i1(<vscale x 4 x i1> %0, <vscale x 4 x i1> %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vcpop_mask_m_i64_nxv4i1:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: vpopc.m a0, v9, v0.t
+; CHECK-NEXT: vcpop.m a0, v9, v0.t
; CHECK-NEXT: ret
entry:
- %a = call i64 @llvm.riscv.vpopc.mask.i64.nxv4i1(
+ %a = call i64 @llvm.riscv.vcpop.mask.i64.nxv4i1(
<vscale x 4 x i1> %0,
<vscale x 4 x i1> %1,
i64 %2)
ret i64 %a
}
-declare i64 @llvm.riscv.vpopc.i64.nxv8i1(
+declare i64 @llvm.riscv.vcpop.i64.nxv8i1(
<vscale x 8 x i1>,
i64);
-define i64 @intrinsic_vpopc_m_i64_nxv8i1(<vscale x 8 x i1> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vpopc_m_i64_nxv8i1:
+define i64 @intrinsic_vcpop_m_i64_nxv8i1(<vscale x 8 x i1> %0, i64 %1) nounwind {
+; CHECK-LABEL: intrinsic_vcpop_m_i64_nxv8i1:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu
-; CHECK-NEXT: vpopc.m a0, v0
+; CHECK-NEXT: vcpop.m a0, v0
; CHECK-NEXT: ret
entry:
- %a = call i64 @llvm.riscv.vpopc.i64.nxv8i1(
+ %a = call i64 @llvm.riscv.vcpop.i64.nxv8i1(
<vscale x 8 x i1> %0,
i64 %1)
ret i64 %a
}
-declare i64 @llvm.riscv.vpopc.mask.i64.nxv8i1(
+declare i64 @llvm.riscv.vcpop.mask.i64.nxv8i1(
<vscale x 8 x i1>,
<vscale x 8 x i1>,
i64);
-define i64 @intrinsic_vpopc_mask_m_i64_nxv8i1(<vscale x 8 x i1> %0, <vscale x 8 x i1> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vpopc_mask_m_i64_nxv8i1:
+define i64 @intrinsic_vcpop_mask_m_i64_nxv8i1(<vscale x 8 x i1> %0, <vscale x 8 x i1> %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vcpop_mask_m_i64_nxv8i1:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu
; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: vpopc.m a0, v9, v0.t
+; CHECK-NEXT: vcpop.m a0, v9, v0.t
; CHECK-NEXT: ret
entry:
- %a = call i64 @llvm.riscv.vpopc.mask.i64.nxv8i1(
+ %a = call i64 @llvm.riscv.vcpop.mask.i64.nxv8i1(
<vscale x 8 x i1> %0,
<vscale x 8 x i1> %1,
i64 %2)
ret i64 %a
}
-declare i64 @llvm.riscv.vpopc.i64.nxv16i1(
+declare i64 @llvm.riscv.vcpop.i64.nxv16i1(
<vscale x 16 x i1>,
i64);
-define i64 @intrinsic_vpopc_m_i64_nxv16i1(<vscale x 16 x i1> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vpopc_m_i64_nxv16i1:
+define i64 @intrinsic_vcpop_m_i64_nxv16i1(<vscale x 16 x i1> %0, i64 %1) nounwind {
+; CHECK-LABEL: intrinsic_vcpop_m_i64_nxv16i1:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu
-; CHECK-NEXT: vpopc.m a0, v0
+; CHECK-NEXT: vcpop.m a0, v0
; CHECK-NEXT: ret
entry:
- %a = call i64 @llvm.riscv.vpopc.i64.nxv16i1(
+ %a = call i64 @llvm.riscv.vcpop.i64.nxv16i1(
<vscale x 16 x i1> %0,
i64 %1)
ret i64 %a
}
-declare i64 @llvm.riscv.vpopc.mask.i64.nxv16i1(
+declare i64 @llvm.riscv.vcpop.mask.i64.nxv16i1(
<vscale x 16 x i1>,
<vscale x 16 x i1>,
i64);
-define i64 @intrinsic_vpopc_mask_m_i64_nxv16i1(<vscale x 16 x i1> %0, <vscale x 16 x i1> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vpopc_mask_m_i64_nxv16i1:
+define i64 @intrinsic_vcpop_mask_m_i64_nxv16i1(<vscale x 16 x i1> %0, <vscale x 16 x i1> %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vcpop_mask_m_i64_nxv16i1:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu
; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: vpopc.m a0, v9, v0.t
+; CHECK-NEXT: vcpop.m a0, v9, v0.t
; CHECK-NEXT: ret
entry:
- %a = call i64 @llvm.riscv.vpopc.mask.i64.nxv16i1(
+ %a = call i64 @llvm.riscv.vcpop.mask.i64.nxv16i1(
<vscale x 16 x i1> %0,
<vscale x 16 x i1> %1,
i64 %2)
ret i64 %a
}
-declare i64 @llvm.riscv.vpopc.i64.nxv32i1(
+declare i64 @llvm.riscv.vcpop.i64.nxv32i1(
<vscale x 32 x i1>,
i64);
-define i64 @intrinsic_vpopc_m_i64_nxv32i1(<vscale x 32 x i1> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vpopc_m_i64_nxv32i1:
+define i64 @intrinsic_vcpop_m_i64_nxv32i1(<vscale x 32 x i1> %0, i64 %1) nounwind {
+; CHECK-LABEL: intrinsic_vcpop_m_i64_nxv32i1:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu
-; CHECK-NEXT: vpopc.m a0, v0
+; CHECK-NEXT: vcpop.m a0, v0
; CHECK-NEXT: ret
entry:
- %a = call i64 @llvm.riscv.vpopc.i64.nxv32i1(
+ %a = call i64 @llvm.riscv.vcpop.i64.nxv32i1(
<vscale x 32 x i1> %0,
i64 %1)
ret i64 %a
}
-declare i64 @llvm.riscv.vpopc.mask.i64.nxv32i1(
+declare i64 @llvm.riscv.vcpop.mask.i64.nxv32i1(
<vscale x 32 x i1>,
<vscale x 32 x i1>,
i64);
-define i64 @intrinsic_vpopc_mask_m_i64_nxv32i1(<vscale x 32 x i1> %0, <vscale x 32 x i1> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vpopc_mask_m_i64_nxv32i1:
+define i64 @intrinsic_vcpop_mask_m_i64_nxv32i1(<vscale x 32 x i1> %0, <vscale x 32 x i1> %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vcpop_mask_m_i64_nxv32i1:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu
; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: vpopc.m a0, v9, v0.t
+; CHECK-NEXT: vcpop.m a0, v9, v0.t
; CHECK-NEXT: ret
entry:
- %a = call i64 @llvm.riscv.vpopc.mask.i64.nxv32i1(
+ %a = call i64 @llvm.riscv.vcpop.mask.i64.nxv32i1(
<vscale x 32 x i1> %0,
<vscale x 32 x i1> %1,
i64 %2)
ret i64 %a
}
-declare i64 @llvm.riscv.vpopc.i64.nxv64i1(
+declare i64 @llvm.riscv.vcpop.i64.nxv64i1(
<vscale x 64 x i1>,
i64);
-define i64 @intrinsic_vpopc_m_i64_nxv64i1(<vscale x 64 x i1> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vpopc_m_i64_nxv64i1:
+define i64 @intrinsic_vcpop_m_i64_nxv64i1(<vscale x 64 x i1> %0, i64 %1) nounwind {
+; CHECK-LABEL: intrinsic_vcpop_m_i64_nxv64i1:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu
-; CHECK-NEXT: vpopc.m a0, v0
+; CHECK-NEXT: vcpop.m a0, v0
; CHECK-NEXT: ret
entry:
- %a = call i64 @llvm.riscv.vpopc.i64.nxv64i1(
+ %a = call i64 @llvm.riscv.vcpop.i64.nxv64i1(
<vscale x 64 x i1> %0,
i64 %1)
ret i64 %a
}
-declare i64 @llvm.riscv.vpopc.mask.i64.nxv64i1(
+declare i64 @llvm.riscv.vcpop.mask.i64.nxv64i1(
<vscale x 64 x i1>,
<vscale x 64 x i1>,
i64);
-define i64 @intrinsic_vpopc_mask_m_i64_nxv64i1(<vscale x 64 x i1> %0, <vscale x 64 x i1> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vpopc_mask_m_i64_nxv64i1:
+define i64 @intrinsic_vcpop_mask_m_i64_nxv64i1(<vscale x 64 x i1> %0, <vscale x 64 x i1> %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vcpop_mask_m_i64_nxv64i1:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu
; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: vpopc.m a0, v9, v0.t
+; CHECK-NEXT: vcpop.m a0, v9, v0.t
; CHECK-NEXT: ret
entry:
- %a = call i64 @llvm.riscv.vpopc.mask.i64.nxv64i1(
+ %a = call i64 @llvm.riscv.vcpop.mask.i64.nxv64i1(
<vscale x 64 x i1> %0,
<vscale x 64 x i1> %1,
i64 %2)
--- /dev/null
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+f -verify-machineinstrs \
+; RUN: < %s | FileCheck %s
+declare <vscale x 1 x i1> @llvm.riscv.vmandn.nxv1i1(
+ <vscale x 1 x i1>,
+ <vscale x 1 x i1>,
+ i32);
+
+define <vscale x 1 x i1> @intrinsic_vmandn_mm_nxv1i1(<vscale x 1 x i1> %0, <vscale x 1 x i1> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vmandn_mm_nxv1i1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
+; CHECK-NEXT: vmandn.mm v0, v0, v8
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmandn.nxv1i1(
+ <vscale x 1 x i1> %0,
+ <vscale x 1 x i1> %1,
+ i32 %2)
+
+ ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmandn.nxv2i1(
+ <vscale x 2 x i1>,
+ <vscale x 2 x i1>,
+ i32);
+
+define <vscale x 2 x i1> @intrinsic_vmandn_mm_nxv2i1(<vscale x 2 x i1> %0, <vscale x 2 x i1> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vmandn_mm_nxv2i1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
+; CHECK-NEXT: vmandn.mm v0, v0, v8
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmandn.nxv2i1(
+ <vscale x 2 x i1> %0,
+ <vscale x 2 x i1> %1,
+ i32 %2)
+
+ ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmandn.nxv4i1(
+ <vscale x 4 x i1>,
+ <vscale x 4 x i1>,
+ i32);
+
+define <vscale x 4 x i1> @intrinsic_vmandn_mm_nxv4i1(<vscale x 4 x i1> %0, <vscale x 4 x i1> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vmandn_mm_nxv4i1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
+; CHECK-NEXT: vmandn.mm v0, v0, v8
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmandn.nxv4i1(
+ <vscale x 4 x i1> %0,
+ <vscale x 4 x i1> %1,
+ i32 %2)
+
+ ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmandn.nxv8i1(
+ <vscale x 8 x i1>,
+ <vscale x 8 x i1>,
+ i32);
+
+define <vscale x 8 x i1> @intrinsic_vmandn_mm_nxv8i1(<vscale x 8 x i1> %0, <vscale x 8 x i1> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vmandn_mm_nxv8i1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu
+; CHECK-NEXT: vmandn.mm v0, v0, v8
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmandn.nxv8i1(
+ <vscale x 8 x i1> %0,
+ <vscale x 8 x i1> %1,
+ i32 %2)
+
+ ret <vscale x 8 x i1> %a
+}
+
+declare <vscale x 16 x i1> @llvm.riscv.vmandn.nxv16i1(
+ <vscale x 16 x i1>,
+ <vscale x 16 x i1>,
+ i32);
+
+define <vscale x 16 x i1> @intrinsic_vmandn_mm_nxv16i1(<vscale x 16 x i1> %0, <vscale x 16 x i1> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vmandn_mm_nxv16i1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu
+; CHECK-NEXT: vmandn.mm v0, v0, v8
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x i1> @llvm.riscv.vmandn.nxv16i1(
+ <vscale x 16 x i1> %0,
+ <vscale x 16 x i1> %1,
+ i32 %2)
+
+ ret <vscale x 16 x i1> %a
+}
+
+declare <vscale x 32 x i1> @llvm.riscv.vmandn.nxv32i1(
+ <vscale x 32 x i1>,
+ <vscale x 32 x i1>,
+ i32);
+
+define <vscale x 32 x i1> @intrinsic_vmandn_mm_nxv32i1(<vscale x 32 x i1> %0, <vscale x 32 x i1> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vmandn_mm_nxv32i1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu
+; CHECK-NEXT: vmandn.mm v0, v0, v8
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 32 x i1> @llvm.riscv.vmandn.nxv32i1(
+ <vscale x 32 x i1> %0,
+ <vscale x 32 x i1> %1,
+ i32 %2)
+
+ ret <vscale x 32 x i1> %a
+}
+
+declare <vscale x 64 x i1> @llvm.riscv.vmandn.nxv64i1(
+ <vscale x 64 x i1>,
+ <vscale x 64 x i1>,
+ i32);
+
+define <vscale x 64 x i1> @intrinsic_vmandn_mm_nxv64i1(<vscale x 64 x i1> %0, <vscale x 64 x i1> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vmandn_mm_nxv64i1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu
+; CHECK-NEXT: vmandn.mm v0, v0, v8
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 64 x i1> @llvm.riscv.vmandn.nxv64i1(
+ <vscale x 64 x i1> %0,
+ <vscale x 64 x i1> %1,
+ i32 %2)
+
+ ret <vscale x 64 x i1> %a
+}
--- /dev/null
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d -verify-machineinstrs \
+; RUN: < %s | FileCheck %s
+declare <vscale x 1 x i1> @llvm.riscv.vmandn.nxv1i1(
+ <vscale x 1 x i1>,
+ <vscale x 1 x i1>,
+ i64);
+
+define <vscale x 1 x i1> @intrinsic_vmandn_mm_nxv1i1(<vscale x 1 x i1> %0, <vscale x 1 x i1> %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vmandn_mm_nxv1i1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
+; CHECK-NEXT: vmandn.mm v0, v0, v8
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmandn.nxv1i1(
+ <vscale x 1 x i1> %0,
+ <vscale x 1 x i1> %1,
+ i64 %2)
+
+ ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmandn.nxv2i1(
+ <vscale x 2 x i1>,
+ <vscale x 2 x i1>,
+ i64);
+
+define <vscale x 2 x i1> @intrinsic_vmandn_mm_nxv2i1(<vscale x 2 x i1> %0, <vscale x 2 x i1> %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vmandn_mm_nxv2i1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
+; CHECK-NEXT: vmandn.mm v0, v0, v8
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmandn.nxv2i1(
+ <vscale x 2 x i1> %0,
+ <vscale x 2 x i1> %1,
+ i64 %2)
+
+ ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmandn.nxv4i1(
+ <vscale x 4 x i1>,
+ <vscale x 4 x i1>,
+ i64);
+
+define <vscale x 4 x i1> @intrinsic_vmandn_mm_nxv4i1(<vscale x 4 x i1> %0, <vscale x 4 x i1> %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vmandn_mm_nxv4i1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
+; CHECK-NEXT: vmandn.mm v0, v0, v8
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmandn.nxv4i1(
+ <vscale x 4 x i1> %0,
+ <vscale x 4 x i1> %1,
+ i64 %2)
+
+ ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmandn.nxv8i1(
+ <vscale x 8 x i1>,
+ <vscale x 8 x i1>,
+ i64);
+
+define <vscale x 8 x i1> @intrinsic_vmandn_mm_nxv8i1(<vscale x 8 x i1> %0, <vscale x 8 x i1> %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vmandn_mm_nxv8i1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu
+; CHECK-NEXT: vmandn.mm v0, v0, v8
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmandn.nxv8i1(
+ <vscale x 8 x i1> %0,
+ <vscale x 8 x i1> %1,
+ i64 %2)
+
+ ret <vscale x 8 x i1> %a
+}
+
+declare <vscale x 16 x i1> @llvm.riscv.vmandn.nxv16i1(
+ <vscale x 16 x i1>,
+ <vscale x 16 x i1>,
+ i64);
+
+define <vscale x 16 x i1> @intrinsic_vmandn_mm_nxv16i1(<vscale x 16 x i1> %0, <vscale x 16 x i1> %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vmandn_mm_nxv16i1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu
+; CHECK-NEXT: vmandn.mm v0, v0, v8
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x i1> @llvm.riscv.vmandn.nxv16i1(
+ <vscale x 16 x i1> %0,
+ <vscale x 16 x i1> %1,
+ i64 %2)
+
+ ret <vscale x 16 x i1> %a
+}
+
+declare <vscale x 32 x i1> @llvm.riscv.vmandn.nxv32i1(
+ <vscale x 32 x i1>,
+ <vscale x 32 x i1>,
+ i64);
+
+define <vscale x 32 x i1> @intrinsic_vmandn_mm_nxv32i1(<vscale x 32 x i1> %0, <vscale x 32 x i1> %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vmandn_mm_nxv32i1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu
+; CHECK-NEXT: vmandn.mm v0, v0, v8
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 32 x i1> @llvm.riscv.vmandn.nxv32i1(
+ <vscale x 32 x i1> %0,
+ <vscale x 32 x i1> %1,
+ i64 %2)
+
+ ret <vscale x 32 x i1> %a
+}
+
+declare <vscale x 64 x i1> @llvm.riscv.vmandn.nxv64i1(
+ <vscale x 64 x i1>,
+ <vscale x 64 x i1>,
+ i64);
+
+define <vscale x 64 x i1> @intrinsic_vmandn_mm_nxv64i1(<vscale x 64 x i1> %0, <vscale x 64 x i1> %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vmandn_mm_nxv64i1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu
+; CHECK-NEXT: vmandn.mm v0, v0, v8
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 64 x i1> @llvm.riscv.vmandn.nxv64i1(
+ <vscale x 64 x i1> %0,
+ <vscale x 64 x i1> %1,
+ i64 %2)
+
+ ret <vscale x 64 x i1> %a
+}
+++ /dev/null
-; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+f -verify-machineinstrs \
-; RUN: < %s | FileCheck %s
-declare <vscale x 1 x i1> @llvm.riscv.vmandnot.nxv1i1(
- <vscale x 1 x i1>,
- <vscale x 1 x i1>,
- i32);
-
-define <vscale x 1 x i1> @intrinsic_vmandnot_mm_nxv1i1(<vscale x 1 x i1> %0, <vscale x 1 x i1> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmandnot_mm_nxv1i1:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
-; CHECK-NEXT: vmandnot.mm v0, v0, v8
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i1> @llvm.riscv.vmandnot.nxv1i1(
- <vscale x 1 x i1> %0,
- <vscale x 1 x i1> %1,
- i32 %2)
-
- ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmandnot.nxv2i1(
- <vscale x 2 x i1>,
- <vscale x 2 x i1>,
- i32);
-
-define <vscale x 2 x i1> @intrinsic_vmandnot_mm_nxv2i1(<vscale x 2 x i1> %0, <vscale x 2 x i1> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmandnot_mm_nxv2i1:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
-; CHECK-NEXT: vmandnot.mm v0, v0, v8
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i1> @llvm.riscv.vmandnot.nxv2i1(
- <vscale x 2 x i1> %0,
- <vscale x 2 x i1> %1,
- i32 %2)
-
- ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmandnot.nxv4i1(
- <vscale x 4 x i1>,
- <vscale x 4 x i1>,
- i32);
-
-define <vscale x 4 x i1> @intrinsic_vmandnot_mm_nxv4i1(<vscale x 4 x i1> %0, <vscale x 4 x i1> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmandnot_mm_nxv4i1:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
-; CHECK-NEXT: vmandnot.mm v0, v0, v8
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i1> @llvm.riscv.vmandnot.nxv4i1(
- <vscale x 4 x i1> %0,
- <vscale x 4 x i1> %1,
- i32 %2)
-
- ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 8 x i1> @llvm.riscv.vmandnot.nxv8i1(
- <vscale x 8 x i1>,
- <vscale x 8 x i1>,
- i32);
-
-define <vscale x 8 x i1> @intrinsic_vmandnot_mm_nxv8i1(<vscale x 8 x i1> %0, <vscale x 8 x i1> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmandnot_mm_nxv8i1:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu
-; CHECK-NEXT: vmandnot.mm v0, v0, v8
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i1> @llvm.riscv.vmandnot.nxv8i1(
- <vscale x 8 x i1> %0,
- <vscale x 8 x i1> %1,
- i32 %2)
-
- ret <vscale x 8 x i1> %a
-}
-
-declare <vscale x 16 x i1> @llvm.riscv.vmandnot.nxv16i1(
- <vscale x 16 x i1>,
- <vscale x 16 x i1>,
- i32);
-
-define <vscale x 16 x i1> @intrinsic_vmandnot_mm_nxv16i1(<vscale x 16 x i1> %0, <vscale x 16 x i1> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmandnot_mm_nxv16i1:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu
-; CHECK-NEXT: vmandnot.mm v0, v0, v8
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i1> @llvm.riscv.vmandnot.nxv16i1(
- <vscale x 16 x i1> %0,
- <vscale x 16 x i1> %1,
- i32 %2)
-
- ret <vscale x 16 x i1> %a
-}
-
-declare <vscale x 32 x i1> @llvm.riscv.vmandnot.nxv32i1(
- <vscale x 32 x i1>,
- <vscale x 32 x i1>,
- i32);
-
-define <vscale x 32 x i1> @intrinsic_vmandnot_mm_nxv32i1(<vscale x 32 x i1> %0, <vscale x 32 x i1> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmandnot_mm_nxv32i1:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu
-; CHECK-NEXT: vmandnot.mm v0, v0, v8
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 32 x i1> @llvm.riscv.vmandnot.nxv32i1(
- <vscale x 32 x i1> %0,
- <vscale x 32 x i1> %1,
- i32 %2)
-
- ret <vscale x 32 x i1> %a
-}
-
-declare <vscale x 64 x i1> @llvm.riscv.vmandnot.nxv64i1(
- <vscale x 64 x i1>,
- <vscale x 64 x i1>,
- i32);
-
-define <vscale x 64 x i1> @intrinsic_vmandnot_mm_nxv64i1(<vscale x 64 x i1> %0, <vscale x 64 x i1> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmandnot_mm_nxv64i1:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu
-; CHECK-NEXT: vmandnot.mm v0, v0, v8
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 64 x i1> @llvm.riscv.vmandnot.nxv64i1(
- <vscale x 64 x i1> %0,
- <vscale x 64 x i1> %1,
- i32 %2)
-
- ret <vscale x 64 x i1> %a
-}
+++ /dev/null
-; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d -verify-machineinstrs \
-; RUN: < %s | FileCheck %s
-declare <vscale x 1 x i1> @llvm.riscv.vmandnot.nxv1i1(
- <vscale x 1 x i1>,
- <vscale x 1 x i1>,
- i64);
-
-define <vscale x 1 x i1> @intrinsic_vmandnot_mm_nxv1i1(<vscale x 1 x i1> %0, <vscale x 1 x i1> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmandnot_mm_nxv1i1:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
-; CHECK-NEXT: vmandnot.mm v0, v0, v8
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i1> @llvm.riscv.vmandnot.nxv1i1(
- <vscale x 1 x i1> %0,
- <vscale x 1 x i1> %1,
- i64 %2)
-
- ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmandnot.nxv2i1(
- <vscale x 2 x i1>,
- <vscale x 2 x i1>,
- i64);
-
-define <vscale x 2 x i1> @intrinsic_vmandnot_mm_nxv2i1(<vscale x 2 x i1> %0, <vscale x 2 x i1> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmandnot_mm_nxv2i1:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
-; CHECK-NEXT: vmandnot.mm v0, v0, v8
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i1> @llvm.riscv.vmandnot.nxv2i1(
- <vscale x 2 x i1> %0,
- <vscale x 2 x i1> %1,
- i64 %2)
-
- ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmandnot.nxv4i1(
- <vscale x 4 x i1>,
- <vscale x 4 x i1>,
- i64);
-
-define <vscale x 4 x i1> @intrinsic_vmandnot_mm_nxv4i1(<vscale x 4 x i1> %0, <vscale x 4 x i1> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmandnot_mm_nxv4i1:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
-; CHECK-NEXT: vmandnot.mm v0, v0, v8
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i1> @llvm.riscv.vmandnot.nxv4i1(
- <vscale x 4 x i1> %0,
- <vscale x 4 x i1> %1,
- i64 %2)
-
- ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 8 x i1> @llvm.riscv.vmandnot.nxv8i1(
- <vscale x 8 x i1>,
- <vscale x 8 x i1>,
- i64);
-
-define <vscale x 8 x i1> @intrinsic_vmandnot_mm_nxv8i1(<vscale x 8 x i1> %0, <vscale x 8 x i1> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmandnot_mm_nxv8i1:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu
-; CHECK-NEXT: vmandnot.mm v0, v0, v8
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i1> @llvm.riscv.vmandnot.nxv8i1(
- <vscale x 8 x i1> %0,
- <vscale x 8 x i1> %1,
- i64 %2)
-
- ret <vscale x 8 x i1> %a
-}
-
-declare <vscale x 16 x i1> @llvm.riscv.vmandnot.nxv16i1(
- <vscale x 16 x i1>,
- <vscale x 16 x i1>,
- i64);
-
-define <vscale x 16 x i1> @intrinsic_vmandnot_mm_nxv16i1(<vscale x 16 x i1> %0, <vscale x 16 x i1> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmandnot_mm_nxv16i1:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu
-; CHECK-NEXT: vmandnot.mm v0, v0, v8
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i1> @llvm.riscv.vmandnot.nxv16i1(
- <vscale x 16 x i1> %0,
- <vscale x 16 x i1> %1,
- i64 %2)
-
- ret <vscale x 16 x i1> %a
-}
-
-declare <vscale x 32 x i1> @llvm.riscv.vmandnot.nxv32i1(
- <vscale x 32 x i1>,
- <vscale x 32 x i1>,
- i64);
-
-define <vscale x 32 x i1> @intrinsic_vmandnot_mm_nxv32i1(<vscale x 32 x i1> %0, <vscale x 32 x i1> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmandnot_mm_nxv32i1:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu
-; CHECK-NEXT: vmandnot.mm v0, v0, v8
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 32 x i1> @llvm.riscv.vmandnot.nxv32i1(
- <vscale x 32 x i1> %0,
- <vscale x 32 x i1> %1,
- i64 %2)
-
- ret <vscale x 32 x i1> %a
-}
-
-declare <vscale x 64 x i1> @llvm.riscv.vmandnot.nxv64i1(
- <vscale x 64 x i1>,
- <vscale x 64 x i1>,
- i64);
-
-define <vscale x 64 x i1> @intrinsic_vmandnot_mm_nxv64i1(<vscale x 64 x i1> %0, <vscale x 64 x i1> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmandnot_mm_nxv64i1:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu
-; CHECK-NEXT: vmandnot.mm v0, v0, v8
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 64 x i1> @llvm.riscv.vmandnot.nxv64i1(
- <vscale x 64 x i1> %0,
- <vscale x 64 x i1> %1,
- i64 %2)
-
- ret <vscale x 64 x i1> %a
-}
ret <vscale x 16 x i1> %not
}
-define <vscale x 1 x i1> @vmandnot_vv_nxv1i1(<vscale x 1 x i1> %va, <vscale x 1 x i1> %vb) {
-; CHECK-LABEL: vmandnot_vv_nxv1i1:
+define <vscale x 1 x i1> @vmandn_vv_nxv1i1(<vscale x 1 x i1> %va, <vscale x 1 x i1> %vb) {
+; CHECK-LABEL: vmandn_vv_nxv1i1:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, mu
-; CHECK-NEXT: vmandnot.mm v0, v0, v8
+; CHECK-NEXT: vmandn.mm v0, v0, v8
; CHECK-NEXT: ret
%head = insertelement <vscale x 1 x i1> undef, i1 1, i32 0
%splat = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> undef, <vscale x 1 x i32> zeroinitializer
ret <vscale x 1 x i1> %vc
}
-define <vscale x 2 x i1> @vmandnot_vv_nxv2i1(<vscale x 2 x i1> %va, <vscale x 2 x i1> %vb) {
-; CHECK-LABEL: vmandnot_vv_nxv2i1:
+define <vscale x 2 x i1> @vmandn_vv_nxv2i1(<vscale x 2 x i1> %va, <vscale x 2 x i1> %vb) {
+; CHECK-LABEL: vmandn_vv_nxv2i1:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, mu
-; CHECK-NEXT: vmandnot.mm v0, v0, v8
+; CHECK-NEXT: vmandn.mm v0, v0, v8
; CHECK-NEXT: ret
%head = insertelement <vscale x 2 x i1> undef, i1 1, i32 0
%splat = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> undef, <vscale x 2 x i32> zeroinitializer
ret <vscale x 2 x i1> %vc
}
-define <vscale x 4 x i1> @vmandnot_vv_nxv4i1(<vscale x 4 x i1> %va, <vscale x 4 x i1> %vb) {
-; CHECK-LABEL: vmandnot_vv_nxv4i1:
+define <vscale x 4 x i1> @vmandn_vv_nxv4i1(<vscale x 4 x i1> %va, <vscale x 4 x i1> %vb) {
+; CHECK-LABEL: vmandn_vv_nxv4i1:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, mu
-; CHECK-NEXT: vmandnot.mm v0, v0, v8
+; CHECK-NEXT: vmandn.mm v0, v0, v8
; CHECK-NEXT: ret
%head = insertelement <vscale x 4 x i1> undef, i1 1, i32 0
%splat = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> undef, <vscale x 4 x i32> zeroinitializer
ret <vscale x 4 x i1> %vc
}
-define <vscale x 8 x i1> @vmandnot_vv_nxv8i1(<vscale x 8 x i1> %va, <vscale x 8 x i1> %vb) {
-; CHECK-LABEL: vmandnot_vv_nxv8i1:
+define <vscale x 8 x i1> @vmandn_vv_nxv8i1(<vscale x 8 x i1> %va, <vscale x 8 x i1> %vb) {
+; CHECK-LABEL: vmandn_vv_nxv8i1:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu
-; CHECK-NEXT: vmandnot.mm v0, v0, v8
+; CHECK-NEXT: vmandn.mm v0, v0, v8
; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x i1> undef, i1 1, i32 0
%splat = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> undef, <vscale x 8 x i32> zeroinitializer
ret <vscale x 8 x i1> %vc
}
-define <vscale x 16 x i1> @vmandnot_vv_nxv16i1(<vscale x 16 x i1> %va, <vscale x 16 x i1> %vb) {
-; CHECK-LABEL: vmandnot_vv_nxv16i1:
+define <vscale x 16 x i1> @vmandn_vv_nxv16i1(<vscale x 16 x i1> %va, <vscale x 16 x i1> %vb) {
+; CHECK-LABEL: vmandn_vv_nxv16i1:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, mu
-; CHECK-NEXT: vmandnot.mm v0, v0, v8
+; CHECK-NEXT: vmandn.mm v0, v0, v8
; CHECK-NEXT: ret
%head = insertelement <vscale x 16 x i1> undef, i1 1, i32 0
%splat = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> undef, <vscale x 16 x i32> zeroinitializer
ret <vscale x 16 x i1> %vc
}
-define <vscale x 1 x i1> @vmornot_vv_nxv1i1(<vscale x 1 x i1> %va, <vscale x 1 x i1> %vb) {
-; CHECK-LABEL: vmornot_vv_nxv1i1:
+define <vscale x 1 x i1> @vmorn_vv_nxv1i1(<vscale x 1 x i1> %va, <vscale x 1 x i1> %vb) {
+; CHECK-LABEL: vmorn_vv_nxv1i1:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, mu
-; CHECK-NEXT: vmornot.mm v0, v0, v8
+; CHECK-NEXT: vmorn.mm v0, v0, v8
; CHECK-NEXT: ret
%head = insertelement <vscale x 1 x i1> undef, i1 1, i32 0
%splat = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> undef, <vscale x 1 x i32> zeroinitializer
ret <vscale x 1 x i1> %vc
}
-define <vscale x 2 x i1> @vmornot_vv_nxv2i1(<vscale x 2 x i1> %va, <vscale x 2 x i1> %vb) {
-; CHECK-LABEL: vmornot_vv_nxv2i1:
+define <vscale x 2 x i1> @vmorn_vv_nxv2i1(<vscale x 2 x i1> %va, <vscale x 2 x i1> %vb) {
+; CHECK-LABEL: vmorn_vv_nxv2i1:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, mu
-; CHECK-NEXT: vmornot.mm v0, v0, v8
+; CHECK-NEXT: vmorn.mm v0, v0, v8
; CHECK-NEXT: ret
%head = insertelement <vscale x 2 x i1> undef, i1 1, i32 0
%splat = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> undef, <vscale x 2 x i32> zeroinitializer
ret <vscale x 2 x i1> %vc
}
-define <vscale x 4 x i1> @vmornot_vv_nxv4i1(<vscale x 4 x i1> %va, <vscale x 4 x i1> %vb) {
-; CHECK-LABEL: vmornot_vv_nxv4i1:
+define <vscale x 4 x i1> @vmorn_vv_nxv4i1(<vscale x 4 x i1> %va, <vscale x 4 x i1> %vb) {
+; CHECK-LABEL: vmorn_vv_nxv4i1:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, mu
-; CHECK-NEXT: vmornot.mm v0, v0, v8
+; CHECK-NEXT: vmorn.mm v0, v0, v8
; CHECK-NEXT: ret
%head = insertelement <vscale x 4 x i1> undef, i1 1, i32 0
%splat = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> undef, <vscale x 4 x i32> zeroinitializer
ret <vscale x 4 x i1> %vc
}
-define <vscale x 8 x i1> @vmornot_vv_nxv8i1(<vscale x 8 x i1> %va, <vscale x 8 x i1> %vb) {
-; CHECK-LABEL: vmornot_vv_nxv8i1:
+define <vscale x 8 x i1> @vmorn_vv_nxv8i1(<vscale x 8 x i1> %va, <vscale x 8 x i1> %vb) {
+; CHECK-LABEL: vmorn_vv_nxv8i1:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu
-; CHECK-NEXT: vmornot.mm v0, v0, v8
+; CHECK-NEXT: vmorn.mm v0, v0, v8
; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x i1> undef, i1 1, i32 0
%splat = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> undef, <vscale x 8 x i32> zeroinitializer
ret <vscale x 8 x i1> %vc
}
-define <vscale x 16 x i1> @vmornot_vv_nxv16i1(<vscale x 16 x i1> %va, <vscale x 16 x i1> %vb) {
-; CHECK-LABEL: vmornot_vv_nxv16i1:
+define <vscale x 16 x i1> @vmorn_vv_nxv16i1(<vscale x 16 x i1> %va, <vscale x 16 x i1> %vb) {
+; CHECK-LABEL: vmorn_vv_nxv16i1:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, mu
-; CHECK-NEXT: vmornot.mm v0, v0, v8
+; CHECK-NEXT: vmorn.mm v0, v0, v8
; CHECK-NEXT: ret
%head = insertelement <vscale x 16 x i1> undef, i1 1, i32 0
%splat = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> undef, <vscale x 16 x i32> zeroinitializer
--- /dev/null
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+f -verify-machineinstrs \
+; RUN: < %s | FileCheck %s
+declare <vscale x 1 x i1> @llvm.riscv.vmorn.nxv1i1(
+ <vscale x 1 x i1>,
+ <vscale x 1 x i1>,
+ i32);
+
+define <vscale x 1 x i1> @intrinsic_vmorn_mm_nxv1i1(<vscale x 1 x i1> %0, <vscale x 1 x i1> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vmorn_mm_nxv1i1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
+; CHECK-NEXT: vmorn.mm v0, v0, v8
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmorn.nxv1i1(
+ <vscale x 1 x i1> %0,
+ <vscale x 1 x i1> %1,
+ i32 %2)
+
+ ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmorn.nxv2i1(
+ <vscale x 2 x i1>,
+ <vscale x 2 x i1>,
+ i32);
+
+define <vscale x 2 x i1> @intrinsic_vmorn_mm_nxv2i1(<vscale x 2 x i1> %0, <vscale x 2 x i1> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vmorn_mm_nxv2i1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
+; CHECK-NEXT: vmorn.mm v0, v0, v8
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmorn.nxv2i1(
+ <vscale x 2 x i1> %0,
+ <vscale x 2 x i1> %1,
+ i32 %2)
+
+ ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmorn.nxv4i1(
+ <vscale x 4 x i1>,
+ <vscale x 4 x i1>,
+ i32);
+
+define <vscale x 4 x i1> @intrinsic_vmorn_mm_nxv4i1(<vscale x 4 x i1> %0, <vscale x 4 x i1> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vmorn_mm_nxv4i1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
+; CHECK-NEXT: vmorn.mm v0, v0, v8
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmorn.nxv4i1(
+ <vscale x 4 x i1> %0,
+ <vscale x 4 x i1> %1,
+ i32 %2)
+
+ ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmorn.nxv8i1(
+ <vscale x 8 x i1>,
+ <vscale x 8 x i1>,
+ i32);
+
+define <vscale x 8 x i1> @intrinsic_vmorn_mm_nxv8i1(<vscale x 8 x i1> %0, <vscale x 8 x i1> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vmorn_mm_nxv8i1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu
+; CHECK-NEXT: vmorn.mm v0, v0, v8
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmorn.nxv8i1(
+ <vscale x 8 x i1> %0,
+ <vscale x 8 x i1> %1,
+ i32 %2)
+
+ ret <vscale x 8 x i1> %a
+}
+
+declare <vscale x 16 x i1> @llvm.riscv.vmorn.nxv16i1(
+ <vscale x 16 x i1>,
+ <vscale x 16 x i1>,
+ i32);
+
+define <vscale x 16 x i1> @intrinsic_vmorn_mm_nxv16i1(<vscale x 16 x i1> %0, <vscale x 16 x i1> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vmorn_mm_nxv16i1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu
+; CHECK-NEXT: vmorn.mm v0, v0, v8
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x i1> @llvm.riscv.vmorn.nxv16i1(
+ <vscale x 16 x i1> %0,
+ <vscale x 16 x i1> %1,
+ i32 %2)
+
+ ret <vscale x 16 x i1> %a
+}
+
+declare <vscale x 32 x i1> @llvm.riscv.vmorn.nxv32i1(
+ <vscale x 32 x i1>,
+ <vscale x 32 x i1>,
+ i32);
+
+define <vscale x 32 x i1> @intrinsic_vmorn_mm_nxv32i1(<vscale x 32 x i1> %0, <vscale x 32 x i1> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vmorn_mm_nxv32i1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu
+; CHECK-NEXT: vmorn.mm v0, v0, v8
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 32 x i1> @llvm.riscv.vmorn.nxv32i1(
+ <vscale x 32 x i1> %0,
+ <vscale x 32 x i1> %1,
+ i32 %2)
+
+ ret <vscale x 32 x i1> %a
+}
+
+declare <vscale x 64 x i1> @llvm.riscv.vmorn.nxv64i1(
+ <vscale x 64 x i1>,
+ <vscale x 64 x i1>,
+ i32);
+
+define <vscale x 64 x i1> @intrinsic_vmorn_mm_nxv64i1(<vscale x 64 x i1> %0, <vscale x 64 x i1> %1, i32 %2) nounwind {
+; CHECK-LABEL: intrinsic_vmorn_mm_nxv64i1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu
+; CHECK-NEXT: vmorn.mm v0, v0, v8
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 64 x i1> @llvm.riscv.vmorn.nxv64i1(
+ <vscale x 64 x i1> %0,
+ <vscale x 64 x i1> %1,
+ i32 %2)
+
+ ret <vscale x 64 x i1> %a
+}
--- /dev/null
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d -verify-machineinstrs \
+; RUN: < %s | FileCheck %s
+declare <vscale x 1 x i1> @llvm.riscv.vmorn.nxv1i1(
+ <vscale x 1 x i1>,
+ <vscale x 1 x i1>,
+ i64);
+
+define <vscale x 1 x i1> @intrinsic_vmorn_mm_nxv1i1(<vscale x 1 x i1> %0, <vscale x 1 x i1> %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vmorn_mm_nxv1i1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
+; CHECK-NEXT: vmorn.mm v0, v0, v8
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 1 x i1> @llvm.riscv.vmorn.nxv1i1(
+ <vscale x 1 x i1> %0,
+ <vscale x 1 x i1> %1,
+ i64 %2)
+
+ ret <vscale x 1 x i1> %a
+}
+
+declare <vscale x 2 x i1> @llvm.riscv.vmorn.nxv2i1(
+ <vscale x 2 x i1>,
+ <vscale x 2 x i1>,
+ i64);
+
+define <vscale x 2 x i1> @intrinsic_vmorn_mm_nxv2i1(<vscale x 2 x i1> %0, <vscale x 2 x i1> %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vmorn_mm_nxv2i1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
+; CHECK-NEXT: vmorn.mm v0, v0, v8
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 2 x i1> @llvm.riscv.vmorn.nxv2i1(
+ <vscale x 2 x i1> %0,
+ <vscale x 2 x i1> %1,
+ i64 %2)
+
+ ret <vscale x 2 x i1> %a
+}
+
+declare <vscale x 4 x i1> @llvm.riscv.vmorn.nxv4i1(
+ <vscale x 4 x i1>,
+ <vscale x 4 x i1>,
+ i64);
+
+define <vscale x 4 x i1> @intrinsic_vmorn_mm_nxv4i1(<vscale x 4 x i1> %0, <vscale x 4 x i1> %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vmorn_mm_nxv4i1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
+; CHECK-NEXT: vmorn.mm v0, v0, v8
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 4 x i1> @llvm.riscv.vmorn.nxv4i1(
+ <vscale x 4 x i1> %0,
+ <vscale x 4 x i1> %1,
+ i64 %2)
+
+ ret <vscale x 4 x i1> %a
+}
+
+declare <vscale x 8 x i1> @llvm.riscv.vmorn.nxv8i1(
+ <vscale x 8 x i1>,
+ <vscale x 8 x i1>,
+ i64);
+
+define <vscale x 8 x i1> @intrinsic_vmorn_mm_nxv8i1(<vscale x 8 x i1> %0, <vscale x 8 x i1> %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vmorn_mm_nxv8i1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu
+; CHECK-NEXT: vmorn.mm v0, v0, v8
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 8 x i1> @llvm.riscv.vmorn.nxv8i1(
+ <vscale x 8 x i1> %0,
+ <vscale x 8 x i1> %1,
+ i64 %2)
+
+ ret <vscale x 8 x i1> %a
+}
+
+declare <vscale x 16 x i1> @llvm.riscv.vmorn.nxv16i1(
+ <vscale x 16 x i1>,
+ <vscale x 16 x i1>,
+ i64);
+
+define <vscale x 16 x i1> @intrinsic_vmorn_mm_nxv16i1(<vscale x 16 x i1> %0, <vscale x 16 x i1> %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vmorn_mm_nxv16i1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu
+; CHECK-NEXT: vmorn.mm v0, v0, v8
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 16 x i1> @llvm.riscv.vmorn.nxv16i1(
+ <vscale x 16 x i1> %0,
+ <vscale x 16 x i1> %1,
+ i64 %2)
+
+ ret <vscale x 16 x i1> %a
+}
+
+declare <vscale x 32 x i1> @llvm.riscv.vmorn.nxv32i1(
+ <vscale x 32 x i1>,
+ <vscale x 32 x i1>,
+ i64);
+
+define <vscale x 32 x i1> @intrinsic_vmorn_mm_nxv32i1(<vscale x 32 x i1> %0, <vscale x 32 x i1> %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vmorn_mm_nxv32i1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu
+; CHECK-NEXT: vmorn.mm v0, v0, v8
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 32 x i1> @llvm.riscv.vmorn.nxv32i1(
+ <vscale x 32 x i1> %0,
+ <vscale x 32 x i1> %1,
+ i64 %2)
+
+ ret <vscale x 32 x i1> %a
+}
+
+declare <vscale x 64 x i1> @llvm.riscv.vmorn.nxv64i1(
+ <vscale x 64 x i1>,
+ <vscale x 64 x i1>,
+ i64);
+
+define <vscale x 64 x i1> @intrinsic_vmorn_mm_nxv64i1(<vscale x 64 x i1> %0, <vscale x 64 x i1> %1, i64 %2) nounwind {
+; CHECK-LABEL: intrinsic_vmorn_mm_nxv64i1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu
+; CHECK-NEXT: vmorn.mm v0, v0, v8
+; CHECK-NEXT: ret
+entry:
+ %a = call <vscale x 64 x i1> @llvm.riscv.vmorn.nxv64i1(
+ <vscale x 64 x i1> %0,
+ <vscale x 64 x i1> %1,
+ i64 %2)
+
+ ret <vscale x 64 x i1> %a
+}
+++ /dev/null
-; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+f -verify-machineinstrs \
-; RUN: < %s | FileCheck %s
-declare <vscale x 1 x i1> @llvm.riscv.vmornot.nxv1i1(
- <vscale x 1 x i1>,
- <vscale x 1 x i1>,
- i32);
-
-define <vscale x 1 x i1> @intrinsic_vmornot_mm_nxv1i1(<vscale x 1 x i1> %0, <vscale x 1 x i1> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmornot_mm_nxv1i1:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
-; CHECK-NEXT: vmornot.mm v0, v0, v8
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i1> @llvm.riscv.vmornot.nxv1i1(
- <vscale x 1 x i1> %0,
- <vscale x 1 x i1> %1,
- i32 %2)
-
- ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmornot.nxv2i1(
- <vscale x 2 x i1>,
- <vscale x 2 x i1>,
- i32);
-
-define <vscale x 2 x i1> @intrinsic_vmornot_mm_nxv2i1(<vscale x 2 x i1> %0, <vscale x 2 x i1> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmornot_mm_nxv2i1:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
-; CHECK-NEXT: vmornot.mm v0, v0, v8
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i1> @llvm.riscv.vmornot.nxv2i1(
- <vscale x 2 x i1> %0,
- <vscale x 2 x i1> %1,
- i32 %2)
-
- ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmornot.nxv4i1(
- <vscale x 4 x i1>,
- <vscale x 4 x i1>,
- i32);
-
-define <vscale x 4 x i1> @intrinsic_vmornot_mm_nxv4i1(<vscale x 4 x i1> %0, <vscale x 4 x i1> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmornot_mm_nxv4i1:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
-; CHECK-NEXT: vmornot.mm v0, v0, v8
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i1> @llvm.riscv.vmornot.nxv4i1(
- <vscale x 4 x i1> %0,
- <vscale x 4 x i1> %1,
- i32 %2)
-
- ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 8 x i1> @llvm.riscv.vmornot.nxv8i1(
- <vscale x 8 x i1>,
- <vscale x 8 x i1>,
- i32);
-
-define <vscale x 8 x i1> @intrinsic_vmornot_mm_nxv8i1(<vscale x 8 x i1> %0, <vscale x 8 x i1> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmornot_mm_nxv8i1:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu
-; CHECK-NEXT: vmornot.mm v0, v0, v8
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i1> @llvm.riscv.vmornot.nxv8i1(
- <vscale x 8 x i1> %0,
- <vscale x 8 x i1> %1,
- i32 %2)
-
- ret <vscale x 8 x i1> %a
-}
-
-declare <vscale x 16 x i1> @llvm.riscv.vmornot.nxv16i1(
- <vscale x 16 x i1>,
- <vscale x 16 x i1>,
- i32);
-
-define <vscale x 16 x i1> @intrinsic_vmornot_mm_nxv16i1(<vscale x 16 x i1> %0, <vscale x 16 x i1> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmornot_mm_nxv16i1:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu
-; CHECK-NEXT: vmornot.mm v0, v0, v8
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i1> @llvm.riscv.vmornot.nxv16i1(
- <vscale x 16 x i1> %0,
- <vscale x 16 x i1> %1,
- i32 %2)
-
- ret <vscale x 16 x i1> %a
-}
-
-declare <vscale x 32 x i1> @llvm.riscv.vmornot.nxv32i1(
- <vscale x 32 x i1>,
- <vscale x 32 x i1>,
- i32);
-
-define <vscale x 32 x i1> @intrinsic_vmornot_mm_nxv32i1(<vscale x 32 x i1> %0, <vscale x 32 x i1> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmornot_mm_nxv32i1:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu
-; CHECK-NEXT: vmornot.mm v0, v0, v8
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 32 x i1> @llvm.riscv.vmornot.nxv32i1(
- <vscale x 32 x i1> %0,
- <vscale x 32 x i1> %1,
- i32 %2)
-
- ret <vscale x 32 x i1> %a
-}
-
-declare <vscale x 64 x i1> @llvm.riscv.vmornot.nxv64i1(
- <vscale x 64 x i1>,
- <vscale x 64 x i1>,
- i32);
-
-define <vscale x 64 x i1> @intrinsic_vmornot_mm_nxv64i1(<vscale x 64 x i1> %0, <vscale x 64 x i1> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmornot_mm_nxv64i1:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu
-; CHECK-NEXT: vmornot.mm v0, v0, v8
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 64 x i1> @llvm.riscv.vmornot.nxv64i1(
- <vscale x 64 x i1> %0,
- <vscale x 64 x i1> %1,
- i32 %2)
-
- ret <vscale x 64 x i1> %a
-}
+++ /dev/null
-; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d -verify-machineinstrs \
-; RUN: < %s | FileCheck %s
-declare <vscale x 1 x i1> @llvm.riscv.vmornot.nxv1i1(
- <vscale x 1 x i1>,
- <vscale x 1 x i1>,
- i64);
-
-define <vscale x 1 x i1> @intrinsic_vmornot_mm_nxv1i1(<vscale x 1 x i1> %0, <vscale x 1 x i1> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmornot_mm_nxv1i1:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
-; CHECK-NEXT: vmornot.mm v0, v0, v8
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i1> @llvm.riscv.vmornot.nxv1i1(
- <vscale x 1 x i1> %0,
- <vscale x 1 x i1> %1,
- i64 %2)
-
- ret <vscale x 1 x i1> %a
-}
-
-declare <vscale x 2 x i1> @llvm.riscv.vmornot.nxv2i1(
- <vscale x 2 x i1>,
- <vscale x 2 x i1>,
- i64);
-
-define <vscale x 2 x i1> @intrinsic_vmornot_mm_nxv2i1(<vscale x 2 x i1> %0, <vscale x 2 x i1> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmornot_mm_nxv2i1:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
-; CHECK-NEXT: vmornot.mm v0, v0, v8
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i1> @llvm.riscv.vmornot.nxv2i1(
- <vscale x 2 x i1> %0,
- <vscale x 2 x i1> %1,
- i64 %2)
-
- ret <vscale x 2 x i1> %a
-}
-
-declare <vscale x 4 x i1> @llvm.riscv.vmornot.nxv4i1(
- <vscale x 4 x i1>,
- <vscale x 4 x i1>,
- i64);
-
-define <vscale x 4 x i1> @intrinsic_vmornot_mm_nxv4i1(<vscale x 4 x i1> %0, <vscale x 4 x i1> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmornot_mm_nxv4i1:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
-; CHECK-NEXT: vmornot.mm v0, v0, v8
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i1> @llvm.riscv.vmornot.nxv4i1(
- <vscale x 4 x i1> %0,
- <vscale x 4 x i1> %1,
- i64 %2)
-
- ret <vscale x 4 x i1> %a
-}
-
-declare <vscale x 8 x i1> @llvm.riscv.vmornot.nxv8i1(
- <vscale x 8 x i1>,
- <vscale x 8 x i1>,
- i64);
-
-define <vscale x 8 x i1> @intrinsic_vmornot_mm_nxv8i1(<vscale x 8 x i1> %0, <vscale x 8 x i1> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmornot_mm_nxv8i1:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu
-; CHECK-NEXT: vmornot.mm v0, v0, v8
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i1> @llvm.riscv.vmornot.nxv8i1(
- <vscale x 8 x i1> %0,
- <vscale x 8 x i1> %1,
- i64 %2)
-
- ret <vscale x 8 x i1> %a
-}
-
-declare <vscale x 16 x i1> @llvm.riscv.vmornot.nxv16i1(
- <vscale x 16 x i1>,
- <vscale x 16 x i1>,
- i64);
-
-define <vscale x 16 x i1> @intrinsic_vmornot_mm_nxv16i1(<vscale x 16 x i1> %0, <vscale x 16 x i1> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmornot_mm_nxv16i1:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu
-; CHECK-NEXT: vmornot.mm v0, v0, v8
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i1> @llvm.riscv.vmornot.nxv16i1(
- <vscale x 16 x i1> %0,
- <vscale x 16 x i1> %1,
- i64 %2)
-
- ret <vscale x 16 x i1> %a
-}
-
-declare <vscale x 32 x i1> @llvm.riscv.vmornot.nxv32i1(
- <vscale x 32 x i1>,
- <vscale x 32 x i1>,
- i64);
-
-define <vscale x 32 x i1> @intrinsic_vmornot_mm_nxv32i1(<vscale x 32 x i1> %0, <vscale x 32 x i1> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmornot_mm_nxv32i1:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu
-; CHECK-NEXT: vmornot.mm v0, v0, v8
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 32 x i1> @llvm.riscv.vmornot.nxv32i1(
- <vscale x 32 x i1> %0,
- <vscale x 32 x i1> %1,
- i64 %2)
-
- ret <vscale x 32 x i1> %a
-}
-
-declare <vscale x 64 x i1> @llvm.riscv.vmornot.nxv64i1(
- <vscale x 64 x i1>,
- <vscale x 64 x i1>,
- i64);
-
-define <vscale x 64 x i1> @intrinsic_vmornot_mm_nxv64i1(<vscale x 64 x i1> %0, <vscale x 64 x i1> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vmornot_mm_nxv64i1:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu
-; CHECK-NEXT: vmornot.mm v0, v0, v8
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 64 x i1> @llvm.riscv.vmornot.nxv64i1(
- <vscale x 64 x i1> %0,
- <vscale x 64 x i1> %1,
- i64 %2)
-
- ret <vscale x 64 x i1> %a
-}
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vmslt.vx v8, v8, a0
-; CHECK-NEXT: vmandnot.mm v0, v0, v8
+; CHECK-NEXT: vmandn.mm v0, v0, v8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i1> @llvm.riscv.vmsge.mask.nxv1i8.i8(
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vmslt.vx v8, v8, a0
-; CHECK-NEXT: vmandnot.mm v0, v0, v8
+; CHECK-NEXT: vmandn.mm v0, v0, v8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i1> @llvm.riscv.vmsge.mask.nxv2i8.i8(
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vmslt.vx v8, v8, a0
-; CHECK-NEXT: vmandnot.mm v0, v0, v8
+; CHECK-NEXT: vmandn.mm v0, v0, v8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i1> @llvm.riscv.vmsge.mask.nxv4i8.i8(
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vmslt.vx v8, v8, a0
-; CHECK-NEXT: vmandnot.mm v0, v0, v8
+; CHECK-NEXT: vmandn.mm v0, v0, v8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i1> @llvm.riscv.vmsge.mask.nxv8i8.i8(
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
; CHECK-NEXT: vmslt.vx v10, v8, a0
-; CHECK-NEXT: vmandnot.mm v0, v0, v10
+; CHECK-NEXT: vmandn.mm v0, v0, v10
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i1> @llvm.riscv.vmsge.mask.nxv16i8.i8(
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu
; CHECK-NEXT: vmslt.vx v12, v8, a0
-; CHECK-NEXT: vmandnot.mm v0, v0, v12
+; CHECK-NEXT: vmandn.mm v0, v0, v12
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i1> @llvm.riscv.vmsge.mask.nxv32i8.i8(
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vmslt.vx v8, v8, a0
-; CHECK-NEXT: vmandnot.mm v0, v0, v8
+; CHECK-NEXT: vmandn.mm v0, v0, v8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i1> @llvm.riscv.vmsge.mask.nxv1i16.i16(
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vmslt.vx v8, v8, a0
-; CHECK-NEXT: vmandnot.mm v0, v0, v8
+; CHECK-NEXT: vmandn.mm v0, v0, v8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i1> @llvm.riscv.vmsge.mask.nxv2i16.i16(
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vmslt.vx v8, v8, a0
-; CHECK-NEXT: vmandnot.mm v0, v0, v8
+; CHECK-NEXT: vmandn.mm v0, v0, v8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i1> @llvm.riscv.vmsge.mask.nxv4i16.i16(
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
; CHECK-NEXT: vmslt.vx v10, v8, a0
-; CHECK-NEXT: vmandnot.mm v0, v0, v10
+; CHECK-NEXT: vmandn.mm v0, v0, v10
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i1> @llvm.riscv.vmsge.mask.nxv8i16.i16(
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
; CHECK-NEXT: vmslt.vx v12, v8, a0
-; CHECK-NEXT: vmandnot.mm v0, v0, v12
+; CHECK-NEXT: vmandn.mm v0, v0, v12
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i1> @llvm.riscv.vmsge.mask.nxv16i16.i16(
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vmslt.vx v8, v8, a0
-; CHECK-NEXT: vmandnot.mm v0, v0, v8
+; CHECK-NEXT: vmandn.mm v0, v0, v8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i1> @llvm.riscv.vmsge.mask.nxv1i32.i32(
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vmslt.vx v8, v8, a0
-; CHECK-NEXT: vmandnot.mm v0, v0, v8
+; CHECK-NEXT: vmandn.mm v0, v0, v8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i1> @llvm.riscv.vmsge.mask.nxv2i32.i32(
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; CHECK-NEXT: vmslt.vx v10, v8, a0
-; CHECK-NEXT: vmandnot.mm v0, v0, v10
+; CHECK-NEXT: vmandn.mm v0, v0, v10
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i1> @llvm.riscv.vmsge.mask.nxv4i32.i32(
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
; CHECK-NEXT: vmslt.vx v12, v8, a0
-; CHECK-NEXT: vmandnot.mm v0, v0, v12
+; CHECK-NEXT: vmandn.mm v0, v0, v12
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i1> @llvm.riscv.vmsge.mask.nxv8i32.i32(
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vmslt.vx v8, v8, a0
-; CHECK-NEXT: vmandnot.mm v0, v0, v8
+; CHECK-NEXT: vmandn.mm v0, v0, v8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i1> @llvm.riscv.vmsge.mask.nxv1i8.i8(
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vmslt.vx v8, v8, a0
-; CHECK-NEXT: vmandnot.mm v0, v0, v8
+; CHECK-NEXT: vmandn.mm v0, v0, v8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i1> @llvm.riscv.vmsge.mask.nxv2i8.i8(
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vmslt.vx v8, v8, a0
-; CHECK-NEXT: vmandnot.mm v0, v0, v8
+; CHECK-NEXT: vmandn.mm v0, v0, v8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i1> @llvm.riscv.vmsge.mask.nxv4i8.i8(
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vmslt.vx v8, v8, a0
-; CHECK-NEXT: vmandnot.mm v0, v0, v8
+; CHECK-NEXT: vmandn.mm v0, v0, v8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i1> @llvm.riscv.vmsge.mask.nxv8i8.i8(
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
; CHECK-NEXT: vmslt.vx v10, v8, a0
-; CHECK-NEXT: vmandnot.mm v0, v0, v10
+; CHECK-NEXT: vmandn.mm v0, v0, v10
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i1> @llvm.riscv.vmsge.mask.nxv16i8.i8(
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu
; CHECK-NEXT: vmslt.vx v12, v8, a0
-; CHECK-NEXT: vmandnot.mm v0, v0, v12
+; CHECK-NEXT: vmandn.mm v0, v0, v12
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i1> @llvm.riscv.vmsge.mask.nxv32i8.i8(
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vmslt.vx v8, v8, a0
-; CHECK-NEXT: vmandnot.mm v0, v0, v8
+; CHECK-NEXT: vmandn.mm v0, v0, v8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i1> @llvm.riscv.vmsge.mask.nxv1i16.i16(
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vmslt.vx v8, v8, a0
-; CHECK-NEXT: vmandnot.mm v0, v0, v8
+; CHECK-NEXT: vmandn.mm v0, v0, v8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i1> @llvm.riscv.vmsge.mask.nxv2i16.i16(
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vmslt.vx v8, v8, a0
-; CHECK-NEXT: vmandnot.mm v0, v0, v8
+; CHECK-NEXT: vmandn.mm v0, v0, v8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i1> @llvm.riscv.vmsge.mask.nxv4i16.i16(
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
; CHECK-NEXT: vmslt.vx v10, v8, a0
-; CHECK-NEXT: vmandnot.mm v0, v0, v10
+; CHECK-NEXT: vmandn.mm v0, v0, v10
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i1> @llvm.riscv.vmsge.mask.nxv8i16.i16(
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
; CHECK-NEXT: vmslt.vx v12, v8, a0
-; CHECK-NEXT: vmandnot.mm v0, v0, v12
+; CHECK-NEXT: vmandn.mm v0, v0, v12
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i1> @llvm.riscv.vmsge.mask.nxv16i16.i16(
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vmslt.vx v8, v8, a0
-; CHECK-NEXT: vmandnot.mm v0, v0, v8
+; CHECK-NEXT: vmandn.mm v0, v0, v8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i1> @llvm.riscv.vmsge.mask.nxv1i32.i32(
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vmslt.vx v8, v8, a0
-; CHECK-NEXT: vmandnot.mm v0, v0, v8
+; CHECK-NEXT: vmandn.mm v0, v0, v8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i1> @llvm.riscv.vmsge.mask.nxv2i32.i32(
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; CHECK-NEXT: vmslt.vx v10, v8, a0
-; CHECK-NEXT: vmandnot.mm v0, v0, v10
+; CHECK-NEXT: vmandn.mm v0, v0, v10
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i1> @llvm.riscv.vmsge.mask.nxv4i32.i32(
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
; CHECK-NEXT: vmslt.vx v12, v8, a0
-; CHECK-NEXT: vmandnot.mm v0, v0, v12
+; CHECK-NEXT: vmandn.mm v0, v0, v12
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i1> @llvm.riscv.vmsge.mask.nxv8i32.i32(
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vmslt.vx v8, v8, a0
-; CHECK-NEXT: vmandnot.mm v0, v0, v8
+; CHECK-NEXT: vmandn.mm v0, v0, v8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i1> @llvm.riscv.vmsge.mask.nxv1i64.i64(
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
; CHECK-NEXT: vmslt.vx v10, v8, a0
-; CHECK-NEXT: vmandnot.mm v0, v0, v10
+; CHECK-NEXT: vmandn.mm v0, v0, v10
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i1> @llvm.riscv.vmsge.mask.nxv2i64.i64(
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu
; CHECK-NEXT: vmslt.vx v12, v8, a0
-; CHECK-NEXT: vmandnot.mm v0, v0, v12
+; CHECK-NEXT: vmandn.mm v0, v0, v12
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i1> @llvm.riscv.vmsge.mask.nxv4i64.i64(
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vmsltu.vx v8, v8, a0
-; CHECK-NEXT: vmandnot.mm v0, v0, v8
+; CHECK-NEXT: vmandn.mm v0, v0, v8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.mask.nxv1i8.i8(
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vmsltu.vx v8, v8, a0
-; CHECK-NEXT: vmandnot.mm v0, v0, v8
+; CHECK-NEXT: vmandn.mm v0, v0, v8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.mask.nxv2i8.i8(
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vmsltu.vx v8, v8, a0
-; CHECK-NEXT: vmandnot.mm v0, v0, v8
+; CHECK-NEXT: vmandn.mm v0, v0, v8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.mask.nxv4i8.i8(
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vmsltu.vx v8, v8, a0
-; CHECK-NEXT: vmandnot.mm v0, v0, v8
+; CHECK-NEXT: vmandn.mm v0, v0, v8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i1> @llvm.riscv.vmsgeu.mask.nxv8i8.i8(
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
; CHECK-NEXT: vmsltu.vx v10, v8, a0
-; CHECK-NEXT: vmandnot.mm v0, v0, v10
+; CHECK-NEXT: vmandn.mm v0, v0, v10
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i1> @llvm.riscv.vmsgeu.mask.nxv16i8.i8(
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu
; CHECK-NEXT: vmsltu.vx v12, v8, a0
-; CHECK-NEXT: vmandnot.mm v0, v0, v12
+; CHECK-NEXT: vmandn.mm v0, v0, v12
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i1> @llvm.riscv.vmsgeu.mask.nxv32i8.i8(
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vmsltu.vx v8, v8, a0
-; CHECK-NEXT: vmandnot.mm v0, v0, v8
+; CHECK-NEXT: vmandn.mm v0, v0, v8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.mask.nxv1i16.i16(
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vmsltu.vx v8, v8, a0
-; CHECK-NEXT: vmandnot.mm v0, v0, v8
+; CHECK-NEXT: vmandn.mm v0, v0, v8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.mask.nxv2i16.i16(
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vmsltu.vx v8, v8, a0
-; CHECK-NEXT: vmandnot.mm v0, v0, v8
+; CHECK-NEXT: vmandn.mm v0, v0, v8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.mask.nxv4i16.i16(
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
; CHECK-NEXT: vmsltu.vx v10, v8, a0
-; CHECK-NEXT: vmandnot.mm v0, v0, v10
+; CHECK-NEXT: vmandn.mm v0, v0, v10
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i1> @llvm.riscv.vmsgeu.mask.nxv8i16.i16(
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
; CHECK-NEXT: vmsltu.vx v12, v8, a0
-; CHECK-NEXT: vmandnot.mm v0, v0, v12
+; CHECK-NEXT: vmandn.mm v0, v0, v12
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i1> @llvm.riscv.vmsgeu.mask.nxv16i16.i16(
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vmsltu.vx v8, v8, a0
-; CHECK-NEXT: vmandnot.mm v0, v0, v8
+; CHECK-NEXT: vmandn.mm v0, v0, v8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.mask.nxv1i32.i32(
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vmsltu.vx v8, v8, a0
-; CHECK-NEXT: vmandnot.mm v0, v0, v8
+; CHECK-NEXT: vmandn.mm v0, v0, v8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.mask.nxv2i32.i32(
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; CHECK-NEXT: vmsltu.vx v10, v8, a0
-; CHECK-NEXT: vmandnot.mm v0, v0, v10
+; CHECK-NEXT: vmandn.mm v0, v0, v10
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.mask.nxv4i32.i32(
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
; CHECK-NEXT: vmsltu.vx v12, v8, a0
-; CHECK-NEXT: vmandnot.mm v0, v0, v12
+; CHECK-NEXT: vmandn.mm v0, v0, v12
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i1> @llvm.riscv.vmsgeu.mask.nxv8i32.i32(
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vmsltu.vx v8, v8, a0
-; CHECK-NEXT: vmandnot.mm v0, v0, v8
+; CHECK-NEXT: vmandn.mm v0, v0, v8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.mask.nxv1i8.i8(
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vmsltu.vx v8, v8, a0
-; CHECK-NEXT: vmandnot.mm v0, v0, v8
+; CHECK-NEXT: vmandn.mm v0, v0, v8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.mask.nxv2i8.i8(
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vmsltu.vx v8, v8, a0
-; CHECK-NEXT: vmandnot.mm v0, v0, v8
+; CHECK-NEXT: vmandn.mm v0, v0, v8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.mask.nxv4i8.i8(
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vmsltu.vx v8, v8, a0
-; CHECK-NEXT: vmandnot.mm v0, v0, v8
+; CHECK-NEXT: vmandn.mm v0, v0, v8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i1> @llvm.riscv.vmsgeu.mask.nxv8i8.i8(
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
; CHECK-NEXT: vmsltu.vx v10, v8, a0
-; CHECK-NEXT: vmandnot.mm v0, v0, v10
+; CHECK-NEXT: vmandn.mm v0, v0, v10
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i1> @llvm.riscv.vmsgeu.mask.nxv16i8.i8(
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu
; CHECK-NEXT: vmsltu.vx v12, v8, a0
-; CHECK-NEXT: vmandnot.mm v0, v0, v12
+; CHECK-NEXT: vmandn.mm v0, v0, v12
; CHECK-NEXT: ret
entry:
%a = call <vscale x 32 x i1> @llvm.riscv.vmsgeu.mask.nxv32i8.i8(
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vmsltu.vx v8, v8, a0
-; CHECK-NEXT: vmandnot.mm v0, v0, v8
+; CHECK-NEXT: vmandn.mm v0, v0, v8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.mask.nxv1i16.i16(
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vmsltu.vx v8, v8, a0
-; CHECK-NEXT: vmandnot.mm v0, v0, v8
+; CHECK-NEXT: vmandn.mm v0, v0, v8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.mask.nxv2i16.i16(
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vmsltu.vx v8, v8, a0
-; CHECK-NEXT: vmandnot.mm v0, v0, v8
+; CHECK-NEXT: vmandn.mm v0, v0, v8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.mask.nxv4i16.i16(
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
; CHECK-NEXT: vmsltu.vx v10, v8, a0
-; CHECK-NEXT: vmandnot.mm v0, v0, v10
+; CHECK-NEXT: vmandn.mm v0, v0, v10
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i1> @llvm.riscv.vmsgeu.mask.nxv8i16.i16(
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
; CHECK-NEXT: vmsltu.vx v12, v8, a0
-; CHECK-NEXT: vmandnot.mm v0, v0, v12
+; CHECK-NEXT: vmandn.mm v0, v0, v12
; CHECK-NEXT: ret
entry:
%a = call <vscale x 16 x i1> @llvm.riscv.vmsgeu.mask.nxv16i16.i16(
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vmsltu.vx v8, v8, a0
-; CHECK-NEXT: vmandnot.mm v0, v0, v8
+; CHECK-NEXT: vmandn.mm v0, v0, v8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.mask.nxv1i32.i32(
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vmsltu.vx v8, v8, a0
-; CHECK-NEXT: vmandnot.mm v0, v0, v8
+; CHECK-NEXT: vmandn.mm v0, v0, v8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.mask.nxv2i32.i32(
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; CHECK-NEXT: vmsltu.vx v10, v8, a0
-; CHECK-NEXT: vmandnot.mm v0, v0, v10
+; CHECK-NEXT: vmandn.mm v0, v0, v10
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.mask.nxv4i32.i32(
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
; CHECK-NEXT: vmsltu.vx v12, v8, a0
-; CHECK-NEXT: vmandnot.mm v0, v0, v12
+; CHECK-NEXT: vmandn.mm v0, v0, v12
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i1> @llvm.riscv.vmsgeu.mask.nxv8i32.i32(
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; CHECK-NEXT: vmsltu.vx v8, v8, a0
-; CHECK-NEXT: vmandnot.mm v0, v0, v8
+; CHECK-NEXT: vmandn.mm v0, v0, v8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.mask.nxv1i64.i64(
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
; CHECK-NEXT: vmsltu.vx v10, v8, a0
-; CHECK-NEXT: vmandnot.mm v0, v0, v10
+; CHECK-NEXT: vmandn.mm v0, v0, v10
; CHECK-NEXT: ret
entry:
%a = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.mask.nxv2i64.i64(
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu
; CHECK-NEXT: vmsltu.vx v12, v8, a0
-; CHECK-NEXT: vmandnot.mm v0, v0, v12
+; CHECK-NEXT: vmandn.mm v0, v0, v12
; CHECK-NEXT: ret
entry:
%a = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.mask.nxv4i64.i64(
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vmnand.mm v9, v0, v0
; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: vpopc.m a1, v9, v0.t
+; CHECK-NEXT: vcpop.m a1, v9, v0.t
; CHECK-NEXT: seqz a1, a1
; CHECK-NEXT: and a0, a1, a0
; CHECK-NEXT: neg a0, a0
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: vpopc.m a1, v9, v0.t
+; CHECK-NEXT: vcpop.m a1, v9, v0.t
; CHECK-NEXT: snez a1, a1
; CHECK-NEXT: or a0, a1, a0
; CHECK-NEXT: andi a0, a0, 1
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: vpopc.m a1, v9, v0.t
+; CHECK-NEXT: vcpop.m a1, v9, v0.t
; CHECK-NEXT: xor a0, a1, a0
; CHECK-NEXT: andi a0, a0, 1
; CHECK-NEXT: neg a0, a0
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vmnand.mm v9, v0, v0
; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: vpopc.m a1, v9, v0.t
+; CHECK-NEXT: vcpop.m a1, v9, v0.t
; CHECK-NEXT: seqz a1, a1
; CHECK-NEXT: and a0, a1, a0
; CHECK-NEXT: neg a0, a0
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: vpopc.m a1, v9, v0.t
+; CHECK-NEXT: vcpop.m a1, v9, v0.t
; CHECK-NEXT: snez a1, a1
; CHECK-NEXT: or a0, a1, a0
; CHECK-NEXT: andi a0, a0, 1
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: vpopc.m a1, v9, v0.t
+; CHECK-NEXT: vcpop.m a1, v9, v0.t
; CHECK-NEXT: xor a0, a1, a0
; CHECK-NEXT: andi a0, a0, 1
; CHECK-NEXT: neg a0, a0
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vmnand.mm v9, v0, v0
; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: vpopc.m a1, v9, v0.t
+; CHECK-NEXT: vcpop.m a1, v9, v0.t
; CHECK-NEXT: seqz a1, a1
; CHECK-NEXT: and a0, a1, a0
; CHECK-NEXT: neg a0, a0
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: vpopc.m a1, v9, v0.t
+; CHECK-NEXT: vcpop.m a1, v9, v0.t
; CHECK-NEXT: snez a1, a1
; CHECK-NEXT: or a0, a1, a0
; CHECK-NEXT: andi a0, a0, 1
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: vpopc.m a1, v9, v0.t
+; CHECK-NEXT: vcpop.m a1, v9, v0.t
; CHECK-NEXT: xor a0, a1, a0
; CHECK-NEXT: andi a0, a0, 1
; CHECK-NEXT: neg a0, a0
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vmnand.mm v9, v0, v0
; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: vpopc.m a1, v9, v0.t
+; CHECK-NEXT: vcpop.m a1, v9, v0.t
; CHECK-NEXT: seqz a1, a1
; CHECK-NEXT: and a0, a1, a0
; CHECK-NEXT: neg a0, a0
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: vpopc.m a1, v9, v0.t
+; CHECK-NEXT: vcpop.m a1, v9, v0.t
; CHECK-NEXT: snez a1, a1
; CHECK-NEXT: or a0, a1, a0
; CHECK-NEXT: andi a0, a0, 1
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: vpopc.m a1, v9, v0.t
+; CHECK-NEXT: vcpop.m a1, v9, v0.t
; CHECK-NEXT: xor a0, a1, a0
; CHECK-NEXT: andi a0, a0, 1
; CHECK-NEXT: neg a0, a0
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
; CHECK-NEXT: vmnand.mm v9, v0, v0
; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: vpopc.m a1, v9, v0.t
+; CHECK-NEXT: vcpop.m a1, v9, v0.t
; CHECK-NEXT: seqz a1, a1
; CHECK-NEXT: and a0, a1, a0
; CHECK-NEXT: neg a0, a0
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: vpopc.m a1, v9, v0.t
+; CHECK-NEXT: vcpop.m a1, v9, v0.t
; CHECK-NEXT: snez a1, a1
; CHECK-NEXT: or a0, a1, a0
; CHECK-NEXT: andi a0, a0, 1
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: vpopc.m a1, v9, v0.t
+; CHECK-NEXT: vcpop.m a1, v9, v0.t
; CHECK-NEXT: xor a0, a1, a0
; CHECK-NEXT: andi a0, a0, 1
; CHECK-NEXT: neg a0, a0
; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu
; CHECK-NEXT: vmnand.mm v9, v0, v0
; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: vpopc.m a1, v9, v0.t
+; CHECK-NEXT: vcpop.m a1, v9, v0.t
; CHECK-NEXT: seqz a1, a1
; CHECK-NEXT: and a0, a1, a0
; CHECK-NEXT: neg a0, a0
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu
; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: vpopc.m a1, v9, v0.t
+; CHECK-NEXT: vcpop.m a1, v9, v0.t
; CHECK-NEXT: snez a1, a1
; CHECK-NEXT: or a0, a1, a0
; CHECK-NEXT: andi a0, a0, 1
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu
; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: vpopc.m a1, v9, v0.t
+; CHECK-NEXT: vcpop.m a1, v9, v0.t
; CHECK-NEXT: xor a0, a1, a0
; CHECK-NEXT: andi a0, a0, 1
; CHECK-NEXT: neg a0, a0
; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu
; CHECK-NEXT: vmnand.mm v9, v0, v0
; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: vpopc.m a1, v9, v0.t
+; CHECK-NEXT: vcpop.m a1, v9, v0.t
; CHECK-NEXT: seqz a1, a1
; CHECK-NEXT: and a0, a1, a0
; CHECK-NEXT: neg a0, a0
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu
; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: vpopc.m a1, v9, v0.t
+; CHECK-NEXT: vcpop.m a1, v9, v0.t
; CHECK-NEXT: snez a1, a1
; CHECK-NEXT: or a0, a1, a0
; CHECK-NEXT: andi a0, a0, 1
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu
; CHECK-NEXT: vmv1r.v v0, v8
-; CHECK-NEXT: vpopc.m a1, v9, v0.t
+; CHECK-NEXT: vcpop.m a1, v9, v0.t
; CHECK-NEXT: xor a0, a1, a0
; CHECK-NEXT: andi a0, a0, 1
; CHECK-NEXT: neg a0, a0
; CHECK-LABEL: vreduce_or_nxv1i1:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, mu
-; CHECK-NEXT: vpopc.m a0, v0
+; CHECK-NEXT: vcpop.m a0, v0
; CHECK-NEXT: snez a0, a0
; CHECK-NEXT: neg a0, a0
; CHECK-NEXT: ret
; CHECK-LABEL: vreduce_xor_nxv1i1:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, mu
-; CHECK-NEXT: vpopc.m a0, v0
+; CHECK-NEXT: vcpop.m a0, v0
; CHECK-NEXT: andi a0, a0, 1
; CHECK-NEXT: neg a0, a0
; CHECK-NEXT: ret
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, mu
; CHECK-NEXT: vmnand.mm v8, v0, v0
-; CHECK-NEXT: vpopc.m a0, v8
+; CHECK-NEXT: vcpop.m a0, v8
; CHECK-NEXT: seqz a0, a0
; CHECK-NEXT: neg a0, a0
; CHECK-NEXT: ret
; CHECK-LABEL: vreduce_or_nxv2i1:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, mu
-; CHECK-NEXT: vpopc.m a0, v0
+; CHECK-NEXT: vcpop.m a0, v0
; CHECK-NEXT: snez a0, a0
; CHECK-NEXT: neg a0, a0
; CHECK-NEXT: ret
; CHECK-LABEL: vreduce_xor_nxv2i1:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, mu
-; CHECK-NEXT: vpopc.m a0, v0
+; CHECK-NEXT: vcpop.m a0, v0
; CHECK-NEXT: andi a0, a0, 1
; CHECK-NEXT: neg a0, a0
; CHECK-NEXT: ret
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, mu
; CHECK-NEXT: vmnand.mm v8, v0, v0
-; CHECK-NEXT: vpopc.m a0, v8
+; CHECK-NEXT: vcpop.m a0, v8
; CHECK-NEXT: seqz a0, a0
; CHECK-NEXT: neg a0, a0
; CHECK-NEXT: ret
; CHECK-LABEL: vreduce_or_nxv4i1:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, mu
-; CHECK-NEXT: vpopc.m a0, v0
+; CHECK-NEXT: vcpop.m a0, v0
; CHECK-NEXT: snez a0, a0
; CHECK-NEXT: neg a0, a0
; CHECK-NEXT: ret
; CHECK-LABEL: vreduce_xor_nxv4i1:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, mu
-; CHECK-NEXT: vpopc.m a0, v0
+; CHECK-NEXT: vcpop.m a0, v0
; CHECK-NEXT: andi a0, a0, 1
; CHECK-NEXT: neg a0, a0
; CHECK-NEXT: ret
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, mu
; CHECK-NEXT: vmnand.mm v8, v0, v0
-; CHECK-NEXT: vpopc.m a0, v8
+; CHECK-NEXT: vcpop.m a0, v8
; CHECK-NEXT: seqz a0, a0
; CHECK-NEXT: neg a0, a0
; CHECK-NEXT: ret
; CHECK-LABEL: vreduce_or_nxv8i1:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu
-; CHECK-NEXT: vpopc.m a0, v0
+; CHECK-NEXT: vcpop.m a0, v0
; CHECK-NEXT: snez a0, a0
; CHECK-NEXT: neg a0, a0
; CHECK-NEXT: ret
; CHECK-LABEL: vreduce_xor_nxv8i1:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu
-; CHECK-NEXT: vpopc.m a0, v0
+; CHECK-NEXT: vcpop.m a0, v0
; CHECK-NEXT: andi a0, a0, 1
; CHECK-NEXT: neg a0, a0
; CHECK-NEXT: ret
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu
; CHECK-NEXT: vmnand.mm v8, v0, v0
-; CHECK-NEXT: vpopc.m a0, v8
+; CHECK-NEXT: vcpop.m a0, v8
; CHECK-NEXT: seqz a0, a0
; CHECK-NEXT: neg a0, a0
; CHECK-NEXT: ret
; CHECK-LABEL: vreduce_or_nxv16i1:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, mu
-; CHECK-NEXT: vpopc.m a0, v0
+; CHECK-NEXT: vcpop.m a0, v0
; CHECK-NEXT: snez a0, a0
; CHECK-NEXT: neg a0, a0
; CHECK-NEXT: ret
; CHECK-LABEL: vreduce_xor_nxv16i1:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, mu
-; CHECK-NEXT: vpopc.m a0, v0
+; CHECK-NEXT: vcpop.m a0, v0
; CHECK-NEXT: andi a0, a0, 1
; CHECK-NEXT: neg a0, a0
; CHECK-NEXT: ret
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, mu
; CHECK-NEXT: vmnand.mm v8, v0, v0
-; CHECK-NEXT: vpopc.m a0, v8
+; CHECK-NEXT: vcpop.m a0, v8
; CHECK-NEXT: seqz a0, a0
; CHECK-NEXT: neg a0, a0
; CHECK-NEXT: ret
; CHECK-LABEL: vreduce_or_nxv32i1:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, mu
-; CHECK-NEXT: vpopc.m a0, v0
+; CHECK-NEXT: vcpop.m a0, v0
; CHECK-NEXT: snez a0, a0
; CHECK-NEXT: neg a0, a0
; CHECK-NEXT: ret
; CHECK-LABEL: vreduce_xor_nxv32i1:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, mu
-; CHECK-NEXT: vpopc.m a0, v0
+; CHECK-NEXT: vcpop.m a0, v0
; CHECK-NEXT: andi a0, a0, 1
; CHECK-NEXT: neg a0, a0
; CHECK-NEXT: ret
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, mu
; CHECK-NEXT: vmnand.mm v8, v0, v0
-; CHECK-NEXT: vpopc.m a0, v8
+; CHECK-NEXT: vcpop.m a0, v8
; CHECK-NEXT: seqz a0, a0
; CHECK-NEXT: neg a0, a0
; CHECK-NEXT: ret
; CHECK-LABEL: vreduce_or_nxv64i1:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e8, m8, ta, mu
-; CHECK-NEXT: vpopc.m a0, v0
+; CHECK-NEXT: vcpop.m a0, v0
; CHECK-NEXT: snez a0, a0
; CHECK-NEXT: neg a0, a0
; CHECK-NEXT: ret
; CHECK-LABEL: vreduce_xor_nxv64i1:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e8, m8, ta, mu
-; CHECK-NEXT: vpopc.m a0, v0
+; CHECK-NEXT: vcpop.m a0, v0
; CHECK-NEXT: andi a0, a0, 1
; CHECK-NEXT: neg a0, a0
; CHECK-NEXT: ret
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e8, m8, ta, mu
; CHECK-NEXT: vmnand.mm v8, v0, v0
-; CHECK-NEXT: vpopc.m a0, v8
+; CHECK-NEXT: vcpop.m a0, v8
; CHECK-NEXT: seqz a0, a0
; CHECK-NEXT: neg a0, a0
; CHECK-NEXT: ret
; CHECK-LABEL: vselect_nxv1i1:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, mu
-; CHECK-NEXT: vmandnot.mm v8, v8, v9
+; CHECK-NEXT: vmandn.mm v8, v8, v9
; CHECK-NEXT: vmand.mm v9, v0, v9
; CHECK-NEXT: vmor.mm v0, v9, v8
; CHECK-NEXT: ret
; CHECK-LABEL: vselect_nxv2i1:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, mu
-; CHECK-NEXT: vmandnot.mm v8, v8, v9
+; CHECK-NEXT: vmandn.mm v8, v8, v9
; CHECK-NEXT: vmand.mm v9, v0, v9
; CHECK-NEXT: vmor.mm v0, v9, v8
; CHECK-NEXT: ret
; CHECK-LABEL: vselect_nxv4i1:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, mu
-; CHECK-NEXT: vmandnot.mm v8, v8, v9
+; CHECK-NEXT: vmandn.mm v8, v8, v9
; CHECK-NEXT: vmand.mm v9, v0, v9
; CHECK-NEXT: vmor.mm v0, v9, v8
; CHECK-NEXT: ret
; CHECK-LABEL: vselect_nxv8i1:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu
-; CHECK-NEXT: vmandnot.mm v8, v8, v9
+; CHECK-NEXT: vmandn.mm v8, v8, v9
; CHECK-NEXT: vmand.mm v9, v0, v9
; CHECK-NEXT: vmor.mm v0, v9, v8
; CHECK-NEXT: ret
; CHECK-LABEL: vselect_nxv16i1:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, mu
-; CHECK-NEXT: vmandnot.mm v8, v8, v9
+; CHECK-NEXT: vmandn.mm v8, v8, v9
; CHECK-NEXT: vmand.mm v9, v0, v9
; CHECK-NEXT: vmor.mm v0, v9, v8
; CHECK-NEXT: ret
; CHECK-LABEL: vselect_nxv32i1:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, mu
-; CHECK-NEXT: vmandnot.mm v8, v8, v9
+; CHECK-NEXT: vmandn.mm v8, v8, v9
; CHECK-NEXT: vmand.mm v9, v0, v9
; CHECK-NEXT: vmor.mm v0, v9, v8
; CHECK-NEXT: ret
; CHECK-LABEL: vselect_nxv64i1:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e8, m8, ta, mu
-; CHECK-NEXT: vmandnot.mm v8, v8, v9
+; CHECK-NEXT: vmandn.mm v8, v8, v9
; CHECK-NEXT: vmand.mm v9, v0, v9
; CHECK-NEXT: vmor.mm v0, v9, v8
; CHECK-NEXT: ret
ret <vscale x 1 x i64> %d
}
- define void @vsetvli_vpopc() {
+ define void @vsetvli_vcpop() {
ret void
}
...
---
-name: vsetvli_vpopc
+name: vsetvli_vcpop
tracksRegLiveness: true
registers:
- { id: 0, class: gpr, preferred-register: '' }
- { id: 10, class: gpr, preferred-register: '' }
- { id: 11, class: vr, preferred-register: '' }
body: |
- ; CHECK-LABEL: name: vsetvli_vpopc
+ ; CHECK-LABEL: name: vsetvli_vcpop
; CHECK: bb.0:
; CHECK-NEXT: successors: %bb.1(0x80000000)
; CHECK-NEXT: liveins: $x10, $x11
; CHECK-NEXT: dead $x0 = PseudoVSETVLIX0 killed $x0, 23, implicit-def $vl, implicit-def $vtype, implicit $vl
; CHECK-NEXT: [[PseudoVLE32_V_MF2_MASK:%[0-9]+]]:vrnov0 = PseudoVLE32_V_MF2_MASK [[PseudoVMV_V_I_MF2_]], killed [[COPY]], $v0, -1, 5, 0, implicit $vl, implicit $vtype
; CHECK-NEXT: dead $x0 = PseudoVSETVLIX0 killed $x0, 69, implicit-def $vl, implicit-def $vtype, implicit $vl
- ; CHECK-NEXT: [[PseudoVPOPC_M_B1_:%[0-9]+]]:gpr = PseudoVPOPC_M_B1 [[PseudoVMSEQ_VI_MF2_]], -1, 0, implicit $vl, implicit $vtype
+ ; CHECK-NEXT: [[PseudoVCPOP_M_B1_:%[0-9]+]]:gpr = PseudoVCPOP_M_B1 [[PseudoVMSEQ_VI_MF2_]], -1, 0, implicit $vl, implicit $vtype
; CHECK-NEXT: [[COPY2:%[0-9]+]]:gpr = COPY $x0
- ; CHECK-NEXT: BEQ killed [[PseudoVPOPC_M_B1_]], [[COPY2]], %bb.3
+ ; CHECK-NEXT: BEQ killed [[PseudoVCPOP_M_B1_]], [[COPY2]], %bb.3
; CHECK-NEXT: PseudoBR %bb.2
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: bb.2:
%5:vmv0 = PseudoVMSEQ_VI_MF2 killed %3, 0, -1, 5
$v0 = COPY %5
%6:vrnov0 = PseudoVLE32_V_MF2_MASK %4, killed %0, $v0, -1, 5, 0
- %7:gpr = PseudoVPOPC_M_B1 %5, -1, 0
+ %7:gpr = PseudoVCPOP_M_B1 %5, -1, 0
%8:gpr = COPY $x0
BEQ killed %7, %8, %bb.3
PseudoBR %bb.2
# ALIAS: vfwredusum.vs v8, v4, v20, v0.t # encoding: [0x57,0x14,0x4a,0xc4]
# NO-ALIAS: vfwredusum.vs v8, v4, v20, v0.t # encoding: [0x57,0x14,0x4a,0xc4]
vfwredsum.vs v8, v4, v20, v0.t
+# ALIAS: vcpop.m a2, v4, v0.t # encoding: [0x57,0x26,0x48,0x40]
+# NO-ALIAS: vcpop.m a2, v4, v0.t # encoding: [0x57,0x26,0x48,0x40]
+vpopc.m a2, v4, v0.t
+# ALIAS: vmandn.mm v8, v4, v20 # encoding: [0x57,0x24,0x4a,0x62]
+# NO-ALIAS: vmandn.mm v8, v4, v20 # encoding: [0x57,0x24,0x4a,0x62]
+vmandnot.mm v8, v4, v20
+# ALIAS: vmorn.mm v8, v4, v20 # encoding: [0x57,0x24,0x4a,0x72]
+# NO-ALIAS: vmorn.mm v8, v4, v20 # encoding: [0x57,0x24,0x4a,0x72]
+vmornot.mm v8, v4, v20
vmsgeu.vx v0, v4, a0, v0.t, v2
# CHECK-INST: vmsltu.vx v2, v4, a0, v0.t
-# CHECK-INST: vmandnot.mm v0, v0, v2
+# CHECK-INST: vmandn.mm v0, v0, v2
# CHECK-ENCODING: [0x57,0x41,0x45,0x68]
# CHECK-ENCODING: [0x57,0x20,0x01,0x62]
# CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions)
vmsge.vx v0, v4, a0, v0.t, v2
# CHECK-INST: vmslt.vx v2, v4, a0, v0.t
-# CHECK-INST: vmandnot.mm v0, v0, v2
+# CHECK-INST: vmandn.mm v0, v0, v2
# CHECK-ENCODING: [0x57,0x41,0x45,0x6c]
# CHECK-ENCODING: [0x57,0x20,0x01,0x62]
# CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions)
vmsgeu.vx v9, v4, a0, v0.t, v2
# CHECK-INST: vmsltu.vx v2, v4, a0
-# CHECK-INST: vmandnot.mm v2, v0, v2
-# CHECK-INST: vmandnot.mm v9, v9, v0
+# CHECK-INST: vmandn.mm v2, v0, v2
+# CHECK-INST: vmandn.mm v9, v9, v0
# CHECK-INST: vmor.mm v9, v2, v9
# CHECK-ENCODING: [0x57,0x41,0x45,0x6a]
# CHECK-ENCODING: [0x57,0x21,0x01,0x62]
vmsge.vx v8, v4, a0, v0.t, v2
# CHECK-INST: vmslt.vx v2, v4, a0
-# CHECK-INST: vmandnot.mm v2, v0, v2
-# CHECK-INST: vmandnot.mm v8, v8, v0
+# CHECK-INST: vmandn.mm v2, v0, v2
+# CHECK-INST: vmandn.mm v8, v8, v0
# CHECK-INST: vmor.mm v8, v2, v8
# CHECK-ENCODING: [0x57,0x41,0x45,0x6e]
# CHECK-ENCODING: [0x57,0x21,0x01,0x62]
# CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions)
# CHECK-UNKNOWN: 57 24 4a 76 <unknown>
-vmandnot.mm v8, v4, v20
-# CHECK-INST: vmandnot.mm v8, v4, v20
+vmandn.mm v8, v4, v20
+# CHECK-INST: vmandn.mm v8, v4, v20
# CHECK-ENCODING: [0x57,0x24,0x4a,0x62]
# CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions)
# CHECK-UNKNOWN: 57 24 4a 62 <unknown>
# CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions)
# CHECK-UNKNOWN: 57 24 4a 7a <unknown>
-vmornot.mm v8, v4, v20
-# CHECK-INST: vmornot.mm v8, v4, v20
+vmorn.mm v8, v4, v20
+# CHECK-INST: vmorn.mm v8, v4, v20
# CHECK-ENCODING: [0x57,0x24,0x4a,0x72]
# CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions)
# CHECK-UNKNOWN: 57 24 4a 72 <unknown>
# CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions)
# CHECK-UNKNOWN: 57 24 4a 7e <unknown>
-vpopc.m a2, v4, v0.t
-# CHECK-INST: vpopc.m a2, v4, v0.t
+vcpop.m a2, v4, v0.t
+# CHECK-INST: vcpop.m a2, v4, v0.t
# CHECK-ENCODING: [0x57,0x26,0x48,0x40]
# CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions)
# CHECK-UNKNOWN: 57 26 48 40 <unknown>
-vpopc.m a2, v4
-# CHECK-INST: vpopc.m a2, v4
+vcpop.m a2, v4
+# CHECK-INST: vcpop.m a2, v4
# CHECK-ENCODING: [0x57,0x26,0x48,0x42]
# CHECK-ERROR: instruction requires the following: 'V' (Vector Instructions)
# CHECK-UNKNOWN: 57 26 48 42 <unknown>