From: Simon Pilgrim Date: Fri, 29 Mar 2019 11:25:58 +0000 (+0000) Subject: [X86] Add X86TargetLowering::isCommutativeBinOp override. X-Git-Tag: llvmorg-10-init~8913 X-Git-Url: http://review.tizen.org/git/?a=commitdiff_plain;h=aeaf7fcddeefe733c87f69d88265f9e27540e5ec;p=platform%2Fupstream%2Fllvm.git [X86] Add X86TargetLowering::isCommutativeBinOp override. We currently just have test coverage for PMULUDQ - will add more in the future. llvm-svn: 357244 --- diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp index 50210ad..a5691cd 100644 --- a/llvm/lib/Target/X86/X86ISelLowering.cpp +++ b/llvm/lib/Target/X86/X86ISelLowering.cpp @@ -28018,6 +28018,16 @@ bool X86TargetLowering::isVectorShiftByScalarCheap(Type *Ty) const { return true; } +bool X86TargetLowering::isCommutativeBinOp(unsigned Opcode) const { + switch (Opcode) { + // TODO: Add more X86ISD opcodes once we have test coverage. + case X86ISD::PMULUDQ: + return true; + } + + return TargetLoweringBase::isCommutativeBinOp(Opcode); +} + bool X86TargetLowering::isTruncateFree(Type *Ty1, Type *Ty2) const { if (!Ty1->isIntegerTy() || !Ty2->isIntegerTy()) return false; diff --git a/llvm/lib/Target/X86/X86ISelLowering.h b/llvm/lib/Target/X86/X86ISelLowering.h index e1d54e2..0f7065b 100644 --- a/llvm/lib/Target/X86/X86ISelLowering.h +++ b/llvm/lib/Target/X86/X86ISelLowering.h @@ -957,6 +957,9 @@ namespace llvm { bool isVectorShiftByScalarCheap(Type *Ty) const override; + /// Returns true if the opcode is a commutative binary operation. + bool isCommutativeBinOp(unsigned Opcode) const override; + /// Return true if it's free to truncate a value of /// type Ty1 to type Ty2. e.g. On x86 it's free to truncate a i32 value in /// register EAX to i16 by referencing its sub-register AX. diff --git a/llvm/test/CodeGen/X86/pmul.ll b/llvm/test/CodeGen/X86/pmul.ll index 011ca12..1960e79 100644 --- a/llvm/test/CodeGen/X86/pmul.ll +++ b/llvm/test/CodeGen/X86/pmul.ll @@ -1374,9 +1374,9 @@ define <2 x i64> @pmuldq_square(<2 x i64> %x) { ; SSE2-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1] ; SSE2-NEXT: psrlq $32, %xmm0 ; SSE2-NEXT: pmuludq %xmm1, %xmm0 +; SSE2-NEXT: pmuludq %xmm1, %xmm1 ; SSE2-NEXT: paddq %xmm0, %xmm0 ; SSE2-NEXT: psllq $32, %xmm0 -; SSE2-NEXT: pmuludq %xmm1, %xmm1 ; SSE2-NEXT: paddq %xmm1, %xmm0 ; SSE2-NEXT: retq ;