return nullptr;
}
+// if Left + Right == Sum (constant or constant splat vector)
+static bool sumMatchConstant(SDValue Left, SDValue Right, unsigned Sum,
+ SelectionDAG &DAG, const SDLoc &DL) {
+ EVT ShiftVT = Left.getValueType();
+ if (ShiftVT != Right.getValueType()) return false;
+
+ SDValue ShiftSum = DAG.FoldConstantArithmetic(ISD::ADD, DL, ShiftVT,
+ Left.getNode(), Right.getNode());
+ if (!ShiftSum) return false;
+
+ ConstantSDNode *CSum = isConstOrConstSplat(ShiftSum);
+ return CSum && CSum->getZExtValue() == Sum;
+}
+
// MatchRotate - Handle an 'or' of two operands. If this is one of the many
// idioms for rotate, and if the target supports rotation instructions, generate
// a rot[lr].
// fold (or (shl x, C1), (srl x, C2)) -> (rotl x, C1)
// fold (or (shl x, C1), (srl x, C2)) -> (rotr x, C2)
- if (isConstOrConstSplat(LHSShiftAmt) && isConstOrConstSplat(RHSShiftAmt)) {
- uint64_t LShVal = isConstOrConstSplat(LHSShiftAmt)->getZExtValue();
- uint64_t RShVal = isConstOrConstSplat(RHSShiftAmt)->getZExtValue();
- if ((LShVal + RShVal) != EltSizeInBits)
- return nullptr;
-
+ if (sumMatchConstant(LHSShiftAmt, RHSShiftAmt, EltSizeInBits, DAG, DL)) {
SDValue Rot = DAG.getNode(HasROTL ? ISD::ROTL : ISD::ROTR, DL, VT,
LHSShiftArg, HasROTL ? LHSShiftAmt : RHSShiftAmt);
// If there is an AND of either shifted operand, apply it to the result.
if (LHSMask.getNode() || RHSMask.getNode()) {
- SDValue Mask = DAG.getAllOnesConstant(DL, VT);
+ SDValue AllOnes = DAG.getAllOnesConstant(DL, VT);
+ SDValue Mask = AllOnes;
if (LHSMask.getNode()) {
- APInt RHSBits = APInt::getLowBitsSet(EltSizeInBits, LShVal);
+ SDValue RHSBits = DAG.getNode(ISD::SRL, DL, VT, AllOnes, RHSShiftAmt);
Mask = DAG.getNode(ISD::AND, DL, VT, Mask,
- DAG.getNode(ISD::OR, DL, VT, LHSMask,
- DAG.getConstant(RHSBits, DL, VT)));
+ DAG.getNode(ISD::OR, DL, VT, LHSMask, RHSBits));
}
if (RHSMask.getNode()) {
- APInt LHSBits = APInt::getHighBitsSet(EltSizeInBits, RShVal);
+ SDValue LHSBits = DAG.getNode(ISD::SHL, DL, VT, AllOnes, LHSShiftAmt);
Mask = DAG.getNode(ISD::AND, DL, VT, Mask,
- DAG.getNode(ISD::OR, DL, VT, RHSMask,
- DAG.getConstant(LHSBits, DL, VT)));
+ DAG.getNode(ISD::OR, DL, VT, RHSMask, LHSBits));
}
Rot = DAG.getNode(ISD::AND, DL, VT, Rot, Mask);
; AVX512-NEXT: vpor %xmm0, %xmm1, %xmm0
; AVX512-NEXT: retq
;
-; XOPAVX1-LABEL: constant_rotate_v2i64:
-; XOPAVX1: # BB#0:
-; XOPAVX1-NEXT: vpshlq {{.*}}(%rip), %xmm0, %xmm1
-; XOPAVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2
-; XOPAVX1-NEXT: vpsubq {{.*}}(%rip), %xmm2, %xmm2
-; XOPAVX1-NEXT: vpshlq %xmm2, %xmm0, %xmm0
-; XOPAVX1-NEXT: vpor %xmm0, %xmm1, %xmm0
-; XOPAVX1-NEXT: retq
-;
-; XOPAVX2-LABEL: constant_rotate_v2i64:
-; XOPAVX2: # BB#0:
-; XOPAVX2-NEXT: vpsllvq {{.*}}(%rip), %xmm0, %xmm1
-; XOPAVX2-NEXT: vpsrlvq {{.*}}(%rip), %xmm0, %xmm0
-; XOPAVX2-NEXT: vpor %xmm0, %xmm1, %xmm0
-; XOPAVX2-NEXT: retq
+; XOP-LABEL: constant_rotate_v2i64:
+; XOP: # BB#0:
+; XOP-NEXT: vprotq {{.*}}(%rip), %xmm0, %xmm0
+; XOP-NEXT: retq
;
; X32-SSE-LABEL: constant_rotate_v2i64:
; X32-SSE: # BB#0:
; AVX512-NEXT: vpor %xmm0, %xmm1, %xmm0
; AVX512-NEXT: retq
;
-; XOPAVX1-LABEL: constant_rotate_v4i32:
-; XOPAVX1: # BB#0:
-; XOPAVX1-NEXT: vpshld {{.*}}(%rip), %xmm0, %xmm1
-; XOPAVX1-NEXT: vpshld {{.*}}(%rip), %xmm0, %xmm0
-; XOPAVX1-NEXT: vpor %xmm0, %xmm1, %xmm0
-; XOPAVX1-NEXT: retq
-;
-; XOPAVX2-LABEL: constant_rotate_v4i32:
-; XOPAVX2: # BB#0:
-; XOPAVX2-NEXT: vpsllvd {{.*}}(%rip), %xmm0, %xmm1
-; XOPAVX2-NEXT: vpsrlvd {{.*}}(%rip), %xmm0, %xmm0
-; XOPAVX2-NEXT: vpor %xmm0, %xmm1, %xmm0
-; XOPAVX2-NEXT: retq
+; XOP-LABEL: constant_rotate_v4i32:
+; XOP: # BB#0:
+; XOP-NEXT: vprotd {{.*}}(%rip), %xmm0, %xmm0
+; XOP-NEXT: retq
;
; X32-SSE-LABEL: constant_rotate_v4i32:
; X32-SSE: # BB#0:
;
; XOP-LABEL: constant_rotate_v8i16:
; XOP: # BB#0:
-; XOP-NEXT: vpshlw {{.*}}(%rip), %xmm0, %xmm1
-; XOP-NEXT: vpxor %xmm2, %xmm2, %xmm2
-; XOP-NEXT: vpsubw {{.*}}(%rip), %xmm2, %xmm2
-; XOP-NEXT: vpshlw %xmm2, %xmm0, %xmm0
-; XOP-NEXT: vpor %xmm0, %xmm1, %xmm0
+; XOP-NEXT: vprotw {{.*}}(%rip), %xmm0, %xmm0
; XOP-NEXT: retq
;
; X32-SSE-LABEL: constant_rotate_v8i16:
;
; XOP-LABEL: constant_rotate_v16i8:
; XOP: # BB#0:
-; XOP-NEXT: vpshlb {{.*}}(%rip), %xmm0, %xmm1
-; XOP-NEXT: vpxor %xmm2, %xmm2, %xmm2
-; XOP-NEXT: vpsubb {{.*}}(%rip), %xmm2, %xmm2
-; XOP-NEXT: vpshlb %xmm2, %xmm0, %xmm0
-; XOP-NEXT: vpor %xmm0, %xmm1, %xmm0
+; XOP-NEXT: vprotb {{.*}}(%rip), %xmm0, %xmm0
; XOP-NEXT: retq
;
; X32-SSE-LABEL: constant_rotate_v16i8:
;
; XOPAVX1-LABEL: constant_rotate_v8i32:
; XOPAVX1: # BB#0:
-; XOPAVX1-NEXT: vpshld {{.*}}(%rip), %xmm0, %xmm1
-; XOPAVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
-; XOPAVX1-NEXT: vpshld {{.*}}(%rip), %xmm2, %xmm3
-; XOPAVX1-NEXT: vinsertf128 $1, %xmm3, %ymm1, %ymm1
-; XOPAVX1-NEXT: vpshld {{.*}}(%rip), %xmm0, %xmm0
-; XOPAVX1-NEXT: vpshld {{.*}}(%rip), %xmm2, %xmm2
-; XOPAVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
-; XOPAVX1-NEXT: vorps %ymm0, %ymm1, %ymm0
+; XOPAVX1-NEXT: vprotd {{.*}}(%rip), %xmm0, %xmm1
+; XOPAVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
+; XOPAVX1-NEXT: vprotd {{.*}}(%rip), %xmm0, %xmm0
+; XOPAVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
; XOPAVX1-NEXT: retq
;
; XOPAVX2-LABEL: constant_rotate_v8i32:
; XOPAVX2: # BB#0:
-; XOPAVX2-NEXT: vpsllvd {{.*}}(%rip), %ymm0, %ymm1
-; XOPAVX2-NEXT: vpsrlvd {{.*}}(%rip), %ymm0, %ymm0
-; XOPAVX2-NEXT: vpor %ymm0, %ymm1, %ymm0
+; XOPAVX2-NEXT: vprotd {{.*}}(%rip), %xmm0, %xmm1
+; XOPAVX2-NEXT: vextracti128 $1, %ymm0, %xmm0
+; XOPAVX2-NEXT: vprotd {{.*}}(%rip), %xmm0, %xmm0
+; XOPAVX2-NEXT: vinserti128 $1, %xmm0, %ymm1, %ymm0
; XOPAVX2-NEXT: retq
%shl = shl <8 x i32> %a, <i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11>
%lshr = lshr <8 x i32> %a, <i32 28, i32 27, i32 26, i32 25, i32 24, i32 23, i32 22, i32 21>
;
; XOPAVX1-LABEL: constant_rotate_v16i16:
; XOPAVX1: # BB#0:
-; XOPAVX1-NEXT: vpshlw {{.*}}(%rip), %xmm0, %xmm1
-; XOPAVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
-; XOPAVX1-NEXT: vpshlw {{.*}}(%rip), %xmm2, %xmm3
-; XOPAVX1-NEXT: vinsertf128 $1, %xmm3, %ymm1, %ymm1
-; XOPAVX1-NEXT: vpxor %xmm3, %xmm3, %xmm3
-; XOPAVX1-NEXT: vpsubw {{.*}}(%rip), %xmm3, %xmm4
-; XOPAVX1-NEXT: vpshlw %xmm4, %xmm2, %xmm2
-; XOPAVX1-NEXT: vpsubw {{.*}}(%rip), %xmm3, %xmm3
-; XOPAVX1-NEXT: vpshlw %xmm3, %xmm0, %xmm0
-; XOPAVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
-; XOPAVX1-NEXT: vorps %ymm0, %ymm1, %ymm0
+; XOPAVX1-NEXT: vprotw {{.*}}(%rip), %xmm0, %xmm1
+; XOPAVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
+; XOPAVX1-NEXT: vprotw {{.*}}(%rip), %xmm0, %xmm0
+; XOPAVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
; XOPAVX1-NEXT: retq
;
; XOPAVX2-LABEL: constant_rotate_v16i16:
; XOPAVX2: # BB#0:
-; XOPAVX2-NEXT: vpmullw {{.*}}(%rip), %ymm0, %ymm1
-; XOPAVX2-NEXT: vpxor %xmm2, %xmm2, %xmm2
-; XOPAVX2-NEXT: vpsubw {{.*}}(%rip), %xmm2, %xmm3
-; XOPAVX2-NEXT: vextracti128 $1, %ymm0, %xmm4
-; XOPAVX2-NEXT: vpshlw %xmm3, %xmm4, %xmm3
-; XOPAVX2-NEXT: vpsubw {{.*}}(%rip), %xmm2, %xmm2
-; XOPAVX2-NEXT: vpshlw %xmm2, %xmm0, %xmm0
-; XOPAVX2-NEXT: vinserti128 $1, %xmm3, %ymm0, %ymm0
-; XOPAVX2-NEXT: vpor %ymm0, %ymm1, %ymm0
+; XOPAVX2-NEXT: vprotw {{.*}}(%rip), %xmm0, %xmm1
+; XOPAVX2-NEXT: vextracti128 $1, %ymm0, %xmm0
+; XOPAVX2-NEXT: vprotw {{.*}}(%rip), %xmm0, %xmm0
+; XOPAVX2-NEXT: vinserti128 $1, %xmm0, %ymm1, %ymm0
; XOPAVX2-NEXT: retq
%shl = shl <16 x i16> %a, <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7, i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15>
%lshr = lshr <16 x i16> %a, <i16 16, i16 15, i16 14, i16 13, i16 12, i16 11, i16 10, i16 9, i16 8, i16 7, i16 6, i16 5, i16 4, i16 3, i16 2, i16 1>
;
; XOPAVX1-LABEL: constant_rotate_v32i8:
; XOPAVX1: # BB#0:
-; XOPAVX1-NEXT: vmovdqa {{.*#+}} xmm1 = [0,1,2,3,4,5,6,7,8,7,6,5,4,3,2,1]
-; XOPAVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
-; XOPAVX1-NEXT: vpshlb %xmm1, %xmm2, %xmm3
-; XOPAVX1-NEXT: vpshlb %xmm1, %xmm0, %xmm1
-; XOPAVX1-NEXT: vinsertf128 $1, %xmm3, %ymm1, %ymm1
-; XOPAVX1-NEXT: vpxor %xmm3, %xmm3, %xmm3
-; XOPAVX1-NEXT: vpsubb {{.*}}(%rip), %xmm3, %xmm3
-; XOPAVX1-NEXT: vpshlb %xmm3, %xmm2, %xmm2
-; XOPAVX1-NEXT: vpshlb %xmm3, %xmm0, %xmm0
-; XOPAVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
-; XOPAVX1-NEXT: vorps %ymm0, %ymm1, %ymm0
+; XOPAVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
+; XOPAVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [0,1,2,3,4,5,6,7,8,7,6,5,4,3,2,1]
+; XOPAVX1-NEXT: vprotb %xmm2, %xmm1, %xmm1
+; XOPAVX1-NEXT: vprotb %xmm2, %xmm0, %xmm0
+; XOPAVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; XOPAVX1-NEXT: retq
;
; XOPAVX2-LABEL: constant_rotate_v32i8:
; XOPAVX2: # BB#0:
-; XOPAVX2-NEXT: vmovdqa {{.*#+}} xmm1 = [0,1,2,3,4,5,6,7,8,7,6,5,4,3,2,1]
-; XOPAVX2-NEXT: vextracti128 $1, %ymm0, %xmm2
-; XOPAVX2-NEXT: vpshlb %xmm1, %xmm2, %xmm3
-; XOPAVX2-NEXT: vpshlb %xmm1, %xmm0, %xmm1
-; XOPAVX2-NEXT: vinserti128 $1, %xmm3, %ymm1, %ymm1
-; XOPAVX2-NEXT: vpxor %xmm3, %xmm3, %xmm3
-; XOPAVX2-NEXT: vpsubb {{.*}}(%rip), %xmm3, %xmm3
-; XOPAVX2-NEXT: vpshlb %xmm3, %xmm2, %xmm2
-; XOPAVX2-NEXT: vpshlb %xmm3, %xmm0, %xmm0
-; XOPAVX2-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm0
-; XOPAVX2-NEXT: vpor %ymm0, %ymm1, %ymm0
+; XOPAVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
+; XOPAVX2-NEXT: vmovdqa {{.*#+}} xmm2 = [0,1,2,3,4,5,6,7,8,7,6,5,4,3,2,1]
+; XOPAVX2-NEXT: vprotb %xmm2, %xmm1, %xmm1
+; XOPAVX2-NEXT: vprotb %xmm2, %xmm0, %xmm0
+; XOPAVX2-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
; XOPAVX2-NEXT: retq
%shl = shl <32 x i8> %a, <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 7, i8 6, i8 5, i8 4, i8 3, i8 2, i8 1, i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 7, i8 6, i8 5, i8 4, i8 3, i8 2, i8 1>
%lshr = lshr <32 x i8> %a, <i8 8, i8 7, i8 6, i8 5, i8 4, i8 3, i8 2, i8 1, i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 7, i8 6, i8 5, i8 4, i8 3, i8 2, i8 1, i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7>