}
break;
}
- case ISD::ADD:
case ISD::MUL:
+ // 'Quadratic Reciprocity': mul(x,x) -> 0 if we're only demanding bit[1]
+ if (DemandedBits == 2 && Op.getOperand(0) == Op.getOperand(1))
+ return TLO.CombineTo(Op, TLO.DAG.getConstant(0, dl, VT));
+ LLVM_FALLTHROUGH;
+ case ISD::ADD:
case ISD::SUB: {
// Add, Sub, and Mul don't demand any bits in positions beyond that
// of the highest bit demanded of them.
define i64 @combine_mul_self_knownbits(i64 %x) {
; SSE-LABEL: combine_mul_self_knownbits:
; SSE: # %bb.0:
-; SSE-NEXT: movq %rdi, %rax
-; SSE-NEXT: imull %eax, %eax
-; SSE-NEXT: andl $2, %eax
+; SSE-NEXT: xorl %eax, %eax
; SSE-NEXT: retq
;
; AVX-LABEL: combine_mul_self_knownbits:
; AVX: # %bb.0:
-; AVX-NEXT: movq %rdi, %rax
-; AVX-NEXT: imull %eax, %eax
-; AVX-NEXT: andl $2, %eax
+; AVX-NEXT: xorl %eax, %eax
; AVX-NEXT: retq
%1 = mul i64 %x, %x
%2 = and i64 %1, 2
define <4 x i32> @combine_mul_self_knownbits_vector(<4 x i32> %x) {
; SSE-LABEL: combine_mul_self_knownbits_vector:
; SSE: # %bb.0:
-; SSE-NEXT: pmulld %xmm0, %xmm0
-; SSE-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; SSE-NEXT: xorps %xmm0, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: combine_mul_self_knownbits_vector:
; AVX: # %bb.0:
-; AVX-NEXT: vpmulld %xmm0, %xmm0, %xmm0
-; AVX-NEXT: vpbroadcastd {{.*#+}} xmm1 = [2,2,2,2]
-; AVX-NEXT: vpand %xmm1, %xmm0, %xmm0
+; AVX-NEXT: vxorps %xmm0, %xmm0, %xmm0
; AVX-NEXT: retq
%1 = mul <4 x i32> %x, %x
%2 = and <4 x i32> %1, <i32 2, i32 2, i32 2, i32 2>