We currently just have test coverage for PMULUDQ - will add more in the future.
llvm-svn: 357244
return true;
}
+bool X86TargetLowering::isCommutativeBinOp(unsigned Opcode) const {
+ switch (Opcode) {
+ // TODO: Add more X86ISD opcodes once we have test coverage.
+ case X86ISD::PMULUDQ:
+ return true;
+ }
+
+ return TargetLoweringBase::isCommutativeBinOp(Opcode);
+}
+
bool X86TargetLowering::isTruncateFree(Type *Ty1, Type *Ty2) const {
if (!Ty1->isIntegerTy() || !Ty2->isIntegerTy())
return false;
bool isVectorShiftByScalarCheap(Type *Ty) const override;
+ /// Returns true if the opcode is a commutative binary operation.
+ bool isCommutativeBinOp(unsigned Opcode) const override;
+
/// Return true if it's free to truncate a value of
/// type Ty1 to type Ty2. e.g. On x86 it's free to truncate a i32 value in
/// register EAX to i16 by referencing its sub-register AX.
; SSE2-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1]
; SSE2-NEXT: psrlq $32, %xmm0
; SSE2-NEXT: pmuludq %xmm1, %xmm0
+; SSE2-NEXT: pmuludq %xmm1, %xmm1
; SSE2-NEXT: paddq %xmm0, %xmm0
; SSE2-NEXT: psllq $32, %xmm0
-; SSE2-NEXT: pmuludq %xmm1, %xmm1
; SSE2-NEXT: paddq %xmm1, %xmm0
; SSE2-NEXT: retq
;