return;
}
case ISD::SRL: {
- // Optimize (srl (and X, C2), C) ->
- // (srli (slli X, (XLen-C3), (XLen-C3) + C)
- // Where C2 is a mask with C3 trailing ones.
- // Taking into account that the C2 may have had lower bits unset by
- // SimplifyDemandedBits. This avoids materializing the C2 immediate.
- // This pattern occurs when type legalizing right shifts for types with
- // less than XLen bits.
auto *N1C = dyn_cast<ConstantSDNode>(Node->getOperand(1));
if (!N1C)
break;
break;
unsigned ShAmt = N1C->getZExtValue();
uint64_t Mask = N0.getConstantOperandVal(1);
+
+ // Optimize (srl (and X, C2), C) -> (slli (srliw X, C3), C3-C) where C2 has
+ // 32 leading zeros and C3 trailing zeros.
+ if (isShiftedMask_64(Mask)) {
+ unsigned XLen = Subtarget->getXLen();
+ unsigned LeadingZeros = XLen - (64 - countLeadingZeros(Mask));
+ unsigned TrailingZeros = countTrailingZeros(Mask);
+ if (LeadingZeros == 32 && TrailingZeros > ShAmt) {
+ SDNode *SRLIW = CurDAG->getMachineNode(
+ RISCV::SRLIW, DL, VT, N0->getOperand(0),
+ CurDAG->getTargetConstant(TrailingZeros, DL, VT));
+ SDNode *SLLI = CurDAG->getMachineNode(
+ RISCV::SLLI, DL, VT, SDValue(SRLIW, 0),
+ CurDAG->getTargetConstant(TrailingZeros - ShAmt, DL, VT));
+ ReplaceNode(Node, SLLI);
+ return;
+ }
+ }
+
+ // Optimize (srl (and X, C2), C) ->
+ // (srli (slli X, (XLen-C3), (XLen-C3) + C)
+ // Where C2 is a mask with C3 trailing ones.
+ // Taking into account that the C2 may have had lower bits unset by
+ // SimplifyDemandedBits. This avoids materializing the C2 immediate.
+ // This pattern occurs when type legalizing right shifts for types with
+ // less than XLen bits.
Mask |= maskTrailingOnes<uint64_t>(ShAmt);
if (!isMask_64(Mask))
break;
;
; RV64I-LABEL: fold_demote_h_s:
; RV64I: # %bb.0:
-; RV64I-NEXT: li a2, 1
-; RV64I-NEXT: slli a2, a2, 31
-; RV64I-NEXT: and a1, a1, a2
-; RV64I-NEXT: srli a1, a1, 16
+; RV64I-NEXT: srliw a1, a1, 31
+; RV64I-NEXT: slli a1, a1, 15
; RV64I-NEXT: slli a0, a0, 49
; RV64I-NEXT: srli a0, a0, 49
; RV64I-NEXT: or a0, a0, a1
;
; RV64-LABEL: neg_sel_special_constant:
; RV64: # %bb.0:
-; RV64-NEXT: li a1, 1
-; RV64-NEXT: slli a1, a1, 31
-; RV64-NEXT: and a0, a0, a1
-; RV64-NEXT: srli a0, a0, 22
+; RV64-NEXT: srliw a0, a0, 31
+; RV64-NEXT: slli a0, a0, 9
; RV64-NEXT: ret
%tmp.1 = icmp slt i32 %a, 0
%retval = select i1 %tmp.1, i32 512, i32 0