setTargetDAGCombine(ISD::ROTL);
setTargetDAGCombine(ISD::ROTR);
}
+ if (Subtarget.hasStdExtZbkb())
+ setTargetDAGCombine(ISD::BITREVERSE);
setTargetDAGCombine(ISD::INTRINSIC_WO_CHAIN);
if (Subtarget.hasStdExtZfh() || Subtarget.hasStdExtZbb())
setTargetDAGCombine(ISD::SIGN_EXTEND_INREG);
}
case RISCVISD::GREV:
case RISCVISD::GORC: {
- assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
+ MVT VT = N->getSimpleValueType(0);
+ MVT XLenVT = Subtarget.getXLenVT();
+ assert((VT == MVT::i16 || (VT == MVT::i32 && Subtarget.is64Bit())) &&
"Unexpected custom legalisation");
assert(isa<ConstantSDNode>(N->getOperand(1)) && "Expected constant");
- SDValue NewOp0 =
- DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(0));
+ assert((Subtarget.hasStdExtZbp() ||
+ (Subtarget.hasStdExtZbkb() && N->getOpcode() == RISCVISD::GREV &&
+ N->getConstantOperandVal(1) == 7)) &&
+ "Unexpected extension");
+ SDValue NewOp0 = DAG.getNode(ISD::ANY_EXTEND, DL, XLenVT, N->getOperand(0));
SDValue NewOp1 =
- DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i64, N->getOperand(1));
- SDValue NewRes = DAG.getNode(N->getOpcode(), DL, MVT::i64, NewOp0, NewOp1);
+ DAG.getNode(ISD::ZERO_EXTEND, DL, XLenVT, N->getOperand(1));
+ SDValue NewRes = DAG.getNode(N->getOpcode(), DL, XLenVT, NewOp0, NewOp1);
// ReplaceNodeResults requires we maintain the same type for the return
// value.
- Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, NewRes));
+ Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, VT, NewRes));
break;
}
case RISCVISD::SHFL: {
return DAG.getSelectCC(DL, Src, Src, ZeroInt, FpToInt, ISD::CondCode::SETUO);
}
+// Combine (bitreverse (bswap X)) to the BREV8 GREVI encoding if the type is
+// smaller than XLenVT.
+static SDValue performBITREVERSECombine(SDNode *N, SelectionDAG &DAG,
+ const RISCVSubtarget &Subtarget) {
+ assert(Subtarget.hasStdExtZbkb() && "Unexpected extension");
+
+ SDValue Src = N->getOperand(0);
+ if (Src.getOpcode() != ISD::BSWAP)
+ return SDValue();
+
+ EVT VT = N->getValueType(0);
+ if (!VT.isScalarInteger() || VT.getSizeInBits() >= Subtarget.getXLen() ||
+ !isPowerOf2_32(VT.getSizeInBits()))
+ return SDValue();
+
+ SDLoc DL(N);
+ return DAG.getNode(RISCVISD::GREV, DL, VT, Src.getOperand(0),
+ DAG.getConstant(7, DL, VT));
+}
+
SDValue RISCVTargetLowering::PerformDAGCombine(SDNode *N,
DAGCombinerInfo &DCI) const {
SelectionDAG &DAG = DCI.DAG;
}
break;
}
+ case ISD::BITREVERSE:
+ return performBITREVERSECombine(N, DAG, Subtarget);
case ISD::FP_TO_SINT:
case ISD::FP_TO_UINT:
return performFP_TO_INTCombine(N, DCI, Subtarget);
ret i64 %tmp
}
-; FIXME: Merge the away the two rev8s in the Zbkb code.
define i16 @test_bswap_bitreverse_i16(i16 %a) nounwind {
; RV32I-LABEL: test_bswap_bitreverse_i16:
; RV32I: # %bb.0:
;
; RV32ZBKB-LABEL: test_bswap_bitreverse_i16:
; RV32ZBKB: # %bb.0:
-; RV32ZBKB-NEXT: rev8 a0, a0
-; RV32ZBKB-NEXT: srli a0, a0, 16
-; RV32ZBKB-NEXT: rev8 a0, a0
; RV32ZBKB-NEXT: brev8 a0, a0
-; RV32ZBKB-NEXT: srli a0, a0, 16
; RV32ZBKB-NEXT: ret
;
; RV64ZBKB-LABEL: test_bswap_bitreverse_i16:
; RV64ZBKB: # %bb.0:
-; RV64ZBKB-NEXT: rev8 a0, a0
-; RV64ZBKB-NEXT: srli a0, a0, 48
-; RV64ZBKB-NEXT: rev8 a0, a0
; RV64ZBKB-NEXT: brev8 a0, a0
-; RV64ZBKB-NEXT: srli a0, a0, 48
; RV64ZBKB-NEXT: ret
;
; RV32ZBP-LABEL: test_bswap_bitreverse_i16:
ret i16 %tmp2
}
-; FIXME: Merge the away the two rev8s in the Zbkb code.
define i32 @test_bswap_bitreverse_i32(i32 %a) nounwind {
; RV32I-LABEL: test_bswap_bitreverse_i32:
; RV32I: # %bb.0:
;
; RV64ZBKB-LABEL: test_bswap_bitreverse_i32:
; RV64ZBKB: # %bb.0:
-; RV64ZBKB-NEXT: rev8 a0, a0
-; RV64ZBKB-NEXT: srli a0, a0, 32
-; RV64ZBKB-NEXT: rev8 a0, a0
; RV64ZBKB-NEXT: brev8 a0, a0
-; RV64ZBKB-NEXT: srli a0, a0, 32
; RV64ZBKB-NEXT: ret
;
; RV32ZBP-LABEL: test_bswap_bitreverse_i32:
ret i64 %tmp2
}
-; FIXME: Merge the away the two rev8s in the Zbkb code.
define i16 @test_bitreverse_bswap_i16(i16 %a) nounwind {
; RV32I-LABEL: test_bitreverse_bswap_i16:
; RV32I: # %bb.0:
;
; RV32ZBKB-LABEL: test_bitreverse_bswap_i16:
; RV32ZBKB: # %bb.0:
-; RV32ZBKB-NEXT: rev8 a0, a0
-; RV32ZBKB-NEXT: srli a0, a0, 16
-; RV32ZBKB-NEXT: rev8 a0, a0
; RV32ZBKB-NEXT: brev8 a0, a0
-; RV32ZBKB-NEXT: srli a0, a0, 16
; RV32ZBKB-NEXT: ret
;
; RV64ZBKB-LABEL: test_bitreverse_bswap_i16:
; RV64ZBKB: # %bb.0:
-; RV64ZBKB-NEXT: rev8 a0, a0
-; RV64ZBKB-NEXT: srli a0, a0, 48
-; RV64ZBKB-NEXT: rev8 a0, a0
; RV64ZBKB-NEXT: brev8 a0, a0
-; RV64ZBKB-NEXT: srli a0, a0, 48
; RV64ZBKB-NEXT: ret
;
; RV32ZBP-LABEL: test_bitreverse_bswap_i16:
ret i16 %tmp2
}
-; FIXME: Merge the away the two rev8s in the Zbkb code.
define i32 @test_bitreverse_bswap_i32(i32 %a) nounwind {
; RV32I-LABEL: test_bitreverse_bswap_i32:
; RV32I: # %bb.0:
;
; RV64ZBKB-LABEL: test_bitreverse_bswap_i32:
; RV64ZBKB: # %bb.0:
-; RV64ZBKB-NEXT: rev8 a0, a0
-; RV64ZBKB-NEXT: srli a0, a0, 32
-; RV64ZBKB-NEXT: rev8 a0, a0
; RV64ZBKB-NEXT: brev8 a0, a0
-; RV64ZBKB-NEXT: srli a0, a0, 32
; RV64ZBKB-NEXT: ret
;
; RV32ZBP-LABEL: test_bitreverse_bswap_i32: