switch (RISCVTargetLowering::getLMUL(Src1VT)) {
default:
llvm_unreachable("Unexpected LMUL!");
- case RISCVII::VLMUL::LMUL_F8:
- VMSLTOpcode =
- IsUnsigned ? RISCV::PseudoVMSLTU_VX_MF8 : RISCV::PseudoVMSLT_VX_MF8;
- VMNANDOpcode = RISCV::PseudoVMNAND_MM_MF8;
- break;
- case RISCVII::VLMUL::LMUL_F4:
- VMSLTOpcode =
- IsUnsigned ? RISCV::PseudoVMSLTU_VX_MF4 : RISCV::PseudoVMSLT_VX_MF4;
- VMNANDOpcode = RISCV::PseudoVMNAND_MM_MF4;
- break;
- case RISCVII::VLMUL::LMUL_F2:
- VMSLTOpcode =
- IsUnsigned ? RISCV::PseudoVMSLTU_VX_MF2 : RISCV::PseudoVMSLT_VX_MF2;
- VMNANDOpcode = RISCV::PseudoVMNAND_MM_MF2;
- break;
- case RISCVII::VLMUL::LMUL_1:
- VMSLTOpcode =
- IsUnsigned ? RISCV::PseudoVMSLTU_VX_M1 : RISCV::PseudoVMSLT_VX_M1;
- VMNANDOpcode = RISCV::PseudoVMNAND_MM_M1;
- break;
- case RISCVII::VLMUL::LMUL_2:
- VMSLTOpcode =
- IsUnsigned ? RISCV::PseudoVMSLTU_VX_M2 : RISCV::PseudoVMSLT_VX_M2;
- VMNANDOpcode = RISCV::PseudoVMNAND_MM_M2;
- break;
- case RISCVII::VLMUL::LMUL_4:
- VMSLTOpcode =
- IsUnsigned ? RISCV::PseudoVMSLTU_VX_M4 : RISCV::PseudoVMSLT_VX_M4;
- VMNANDOpcode = RISCV::PseudoVMNAND_MM_M4;
- break;
- case RISCVII::VLMUL::LMUL_8:
- VMSLTOpcode =
- IsUnsigned ? RISCV::PseudoVMSLTU_VX_M8 : RISCV::PseudoVMSLT_VX_M8;
- VMNANDOpcode = RISCV::PseudoVMNAND_MM_M8;
+#define CASE_VMSLT_VMNAND_OPCODES(lmulenum, suffix) \
+ case RISCVII::VLMUL::lmulenum: \
+ VMSLTOpcode = IsUnsigned ? RISCV::PseudoVMSLTU_VX_##suffix \
+ : RISCV::PseudoVMSLT_VX_##suffix; \
+ VMNANDOpcode = RISCV::PseudoVMNAND_MM_##suffix; \
break;
+ CASE_VMSLT_VMNAND_OPCODES(LMUL_F8, MF8)
+ CASE_VMSLT_VMNAND_OPCODES(LMUL_F4, MF4)
+ CASE_VMSLT_VMNAND_OPCODES(LMUL_F2, MF2)
+ CASE_VMSLT_VMNAND_OPCODES(LMUL_1, M1)
+ CASE_VMSLT_VMNAND_OPCODES(LMUL_2, M2)
+ CASE_VMSLT_VMNAND_OPCODES(LMUL_4, M4)
+ CASE_VMSLT_VMNAND_OPCODES(LMUL_8, M8)
+#undef CASE_VMSLT_VMNAND_OPCODES
}
SDValue SEW = CurDAG->getTargetConstant(
Log2_32(Src1VT.getScalarSizeInBits()), DL, XLenVT);
switch (RISCVTargetLowering::getLMUL(Src1VT)) {
default:
llvm_unreachable("Unexpected LMUL!");
- case RISCVII::VLMUL::LMUL_F8:
- VMSLTOpcode =
- IsUnsigned ? RISCV::PseudoVMSLTU_VX_MF8 : RISCV::PseudoVMSLT_VX_MF8;
- VMSLTMaskOpcode = IsUnsigned ? RISCV::PseudoVMSLTU_VX_MF8_MASK
- : RISCV::PseudoVMSLT_VX_MF8_MASK;
- break;
- case RISCVII::VLMUL::LMUL_F4:
- VMSLTOpcode =
- IsUnsigned ? RISCV::PseudoVMSLTU_VX_MF4 : RISCV::PseudoVMSLT_VX_MF4;
- VMSLTMaskOpcode = IsUnsigned ? RISCV::PseudoVMSLTU_VX_MF4_MASK
- : RISCV::PseudoVMSLT_VX_MF4_MASK;
- break;
- case RISCVII::VLMUL::LMUL_F2:
- VMSLTOpcode =
- IsUnsigned ? RISCV::PseudoVMSLTU_VX_MF2 : RISCV::PseudoVMSLT_VX_MF2;
- VMSLTMaskOpcode = IsUnsigned ? RISCV::PseudoVMSLTU_VX_MF2_MASK
- : RISCV::PseudoVMSLT_VX_MF2_MASK;
- break;
- case RISCVII::VLMUL::LMUL_1:
- VMSLTOpcode =
- IsUnsigned ? RISCV::PseudoVMSLTU_VX_M1 : RISCV::PseudoVMSLT_VX_M1;
- VMSLTMaskOpcode = IsUnsigned ? RISCV::PseudoVMSLTU_VX_M1_MASK
- : RISCV::PseudoVMSLT_VX_M1_MASK;
- break;
- case RISCVII::VLMUL::LMUL_2:
- VMSLTOpcode =
- IsUnsigned ? RISCV::PseudoVMSLTU_VX_M2 : RISCV::PseudoVMSLT_VX_M2;
- VMSLTMaskOpcode = IsUnsigned ? RISCV::PseudoVMSLTU_VX_M2_MASK
- : RISCV::PseudoVMSLT_VX_M2_MASK;
- break;
- case RISCVII::VLMUL::LMUL_4:
- VMSLTOpcode =
- IsUnsigned ? RISCV::PseudoVMSLTU_VX_M4 : RISCV::PseudoVMSLT_VX_M4;
- VMSLTMaskOpcode = IsUnsigned ? RISCV::PseudoVMSLTU_VX_M4_MASK
- : RISCV::PseudoVMSLT_VX_M4_MASK;
- break;
- case RISCVII::VLMUL::LMUL_8:
- VMSLTOpcode =
- IsUnsigned ? RISCV::PseudoVMSLTU_VX_M8 : RISCV::PseudoVMSLT_VX_M8;
- VMSLTMaskOpcode = IsUnsigned ? RISCV::PseudoVMSLTU_VX_M8_MASK
- : RISCV::PseudoVMSLT_VX_M8_MASK;
+#define CASE_VMSLT_OPCODES(lmulenum, suffix) \
+ case RISCVII::VLMUL::lmulenum: \
+ VMSLTOpcode = IsUnsigned ? RISCV::PseudoVMSLTU_VX_##suffix \
+ : RISCV::PseudoVMSLT_VX_##suffix; \
+ VMSLTMaskOpcode = IsUnsigned ? RISCV::PseudoVMSLTU_VX_##suffix##_MASK \
+ : RISCV::PseudoVMSLT_VX_##suffix##_MASK; \
break;
+ CASE_VMSLT_OPCODES(LMUL_F8, MF8)
+ CASE_VMSLT_OPCODES(LMUL_F4, MF4)
+ CASE_VMSLT_OPCODES(LMUL_F2, MF2)
+ CASE_VMSLT_OPCODES(LMUL_1, M1)
+ CASE_VMSLT_OPCODES(LMUL_2, M2)
+ CASE_VMSLT_OPCODES(LMUL_4, M4)
+ CASE_VMSLT_OPCODES(LMUL_8, M8)
+#undef CASE_VMSLT_OPCODES
}
// Mask operations use the LMUL from the mask type.
switch (RISCVTargetLowering::getLMUL(VT)) {
default:
llvm_unreachable("Unexpected LMUL!");
- case RISCVII::VLMUL::LMUL_F8:
- VMXOROpcode = RISCV::PseudoVMXOR_MM_MF8;
- VMANDNOpcode = RISCV::PseudoVMANDN_MM_MF8;
- break;
- case RISCVII::VLMUL::LMUL_F4:
- VMXOROpcode = RISCV::PseudoVMXOR_MM_MF4;
- VMANDNOpcode = RISCV::PseudoVMANDN_MM_MF4;
- break;
- case RISCVII::VLMUL::LMUL_F2:
- VMXOROpcode = RISCV::PseudoVMXOR_MM_MF2;
- VMANDNOpcode = RISCV::PseudoVMANDN_MM_MF2;
- break;
- case RISCVII::VLMUL::LMUL_1:
- VMXOROpcode = RISCV::PseudoVMXOR_MM_M1;
- VMANDNOpcode = RISCV::PseudoVMANDN_MM_M1;
- break;
- case RISCVII::VLMUL::LMUL_2:
- VMXOROpcode = RISCV::PseudoVMXOR_MM_M2;
- VMANDNOpcode = RISCV::PseudoVMANDN_MM_M2;
- break;
- case RISCVII::VLMUL::LMUL_4:
- VMXOROpcode = RISCV::PseudoVMXOR_MM_M4;
- VMANDNOpcode = RISCV::PseudoVMANDN_MM_M4;
- break;
- case RISCVII::VLMUL::LMUL_8:
- VMXOROpcode = RISCV::PseudoVMXOR_MM_M8;
- VMANDNOpcode = RISCV::PseudoVMANDN_MM_M8;
+#define CASE_VMXOR_VANDN_OPCODES(lmulenum, suffix) \
+ case RISCVII::VLMUL::lmulenum: \
+ VMXOROpcode = RISCV::PseudoVMXOR_MM_##suffix; \
+ VMANDNOpcode = RISCV::PseudoVMANDN_MM_##suffix; \
break;
+ CASE_VMXOR_VANDN_OPCODES(LMUL_F8, MF8)
+ CASE_VMXOR_VANDN_OPCODES(LMUL_F4, MF4)
+ CASE_VMXOR_VANDN_OPCODES(LMUL_F2, MF2)
+ CASE_VMXOR_VANDN_OPCODES(LMUL_1, M1)
+ CASE_VMXOR_VANDN_OPCODES(LMUL_2, M2)
+ CASE_VMXOR_VANDN_OPCODES(LMUL_4, M4)
+ CASE_VMXOR_VANDN_OPCODES(LMUL_8, M8)
+#undef CASE_VMXOR_VANDN_OPCODES
}
SDValue SEW = CurDAG->getTargetConstant(
Log2_32(Src1VT.getScalarSizeInBits()), DL, XLenVT);