case Intrinsic::x86_axor32:
case Intrinsic::x86_axor64:
case Intrinsic::x86_atomic_add_cc:
- case Intrinsic::x86_atomic_sub_cc: {
+ case Intrinsic::x86_atomic_sub_cc:
+ case Intrinsic::x86_atomic_or_cc:
+ case Intrinsic::x86_atomic_and_cc:
+ case Intrinsic::x86_atomic_xor_cc: {
Info.opc = ISD::INTRINSIC_W_CHAIN;
Info.ptrVal = I.getArgOperand(0);
unsigned Size = I.getArgOperand(1)->getType()->getScalarSizeInBits();
{Chain, Op1, Op2}, VT, MMO);
}
case Intrinsic::x86_atomic_add_cc:
- case Intrinsic::x86_atomic_sub_cc: {
+ case Intrinsic::x86_atomic_sub_cc:
+ case Intrinsic::x86_atomic_or_cc:
+ case Intrinsic::x86_atomic_and_cc:
+ case Intrinsic::x86_atomic_xor_cc: {
SDLoc DL(Op);
SDValue Chain = Op.getOperand(0);
SDValue Op1 = Op.getOperand(2);
case Intrinsic::x86_atomic_sub_cc:
Opc = X86ISD::LSUB;
break;
+ case Intrinsic::x86_atomic_or_cc:
+ Opc = X86ISD::LOR;
+ break;
+ case Intrinsic::x86_atomic_and_cc:
+ Opc = X86ISD::LAND;
+ break;
+ case Intrinsic::x86_atomic_xor_cc:
+ Opc = X86ISD::LXOR;
+ break;
}
MachineMemOperand *MMO = cast<MemIntrinsicSDNode>(Op)->getMemOperand();
SDValue LockArith =
return Pred == CmpInst::ICMP_SLT;
return false;
}
+ if (Opc == AtomicRMWInst::Or) {
+ if (match(I, m_OneUse(m_c_Or(m_Specific(Op), m_Value()))) &&
+ match(I->user_back(), m_ICmp(Pred, m_Value(), m_ZeroInt())))
+ return Pred == CmpInst::ICMP_EQ || Pred == CmpInst::ICMP_SLT;
+ }
+ if (Opc == AtomicRMWInst::And) {
+ if (match(I, m_OneUse(m_c_And(m_Specific(Op), m_Value()))) &&
+ match(I->user_back(), m_ICmp(Pred, m_Value(), m_ZeroInt())))
+ return Pred == CmpInst::ICMP_EQ || Pred == CmpInst::ICMP_SLT;
+ }
+ if (Opc == AtomicRMWInst::Xor) {
+ if (match(I, m_c_ICmp(Pred, m_Specific(Op), m_Value())))
+ return Pred == CmpInst::ICMP_EQ;
+ if (match(I, m_OneUse(m_c_Xor(m_Specific(Op), m_Value()))) &&
+ match(I->user_back(), m_ICmp(Pred, m_Value(), m_ZeroInt())))
+ return Pred == CmpInst::ICMP_SLT;
+ }
return false;
}
case AtomicRMWInst::Sub:
IID = Intrinsic::x86_atomic_sub_cc;
break;
+ case AtomicRMWInst::Or:
+ IID = Intrinsic::x86_atomic_or_cc;
+ break;
+ case AtomicRMWInst::And:
+ IID = Intrinsic::x86_atomic_and_cc;
+ break;
+ case AtomicRMWInst::Xor:
+ IID = Intrinsic::x86_atomic_xor_cc;
+ break;
}
Function *CmpArith =
Intrinsic::getDeclaration(AI->getModule(), IID, AI->getType());
case AtomicRMWInst::Or:
case AtomicRMWInst::And:
case AtomicRMWInst::Xor:
+ if (shouldExpandCmpArithRMWInIR(AI))
+ return AtomicExpansionKind::CmpArithIntrinsic;
return shouldExpandLogicAtomicRMWInIR(AI);
case AtomicRMWInst::Nand:
case AtomicRMWInst::Max:
define i1 @lock_or_sete(ptr %0, i32 %1) nounwind {
; CHECK-LABEL: lock_or_sete:
; CHECK: # %bb.0:
-; CHECK-NEXT: movl (%rdi), %eax
-; CHECK-NEXT: .p2align 4, 0x90
-; CHECK-NEXT: .LBB4_1: # %atomicrmw.start
-; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
-; CHECK-NEXT: movl %eax, %ecx
-; CHECK-NEXT: orl %esi, %ecx
-; CHECK-NEXT: lock cmpxchgl %ecx, (%rdi)
-; CHECK-NEXT: jne .LBB4_1
-; CHECK-NEXT: # %bb.2: # %atomicrmw.end
-; CHECK-NEXT: orl %esi, %eax
+; CHECK-NEXT: lock orl %esi, (%rdi)
; CHECK-NEXT: sete %al
; CHECK-NEXT: retq
%3 = atomicrmw or ptr %0, i32 %1 seq_cst, align 4
define i1 @lock_or_sets(ptr %0, i32 %1) nounwind {
; CHECK-LABEL: lock_or_sets:
; CHECK: # %bb.0:
-; CHECK-NEXT: movl (%rdi), %eax
-; CHECK-NEXT: .p2align 4, 0x90
-; CHECK-NEXT: .LBB5_1: # %atomicrmw.start
-; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
-; CHECK-NEXT: movl %eax, %ecx
-; CHECK-NEXT: orl %esi, %ecx
-; CHECK-NEXT: lock cmpxchgl %ecx, (%rdi)
-; CHECK-NEXT: jne .LBB5_1
-; CHECK-NEXT: # %bb.2: # %atomicrmw.end
-; CHECK-NEXT: orl %esi, %eax
-; CHECK-NEXT: shrl $31, %eax
-; CHECK-NEXT: # kill: def $al killed $al killed $eax
+; CHECK-NEXT: lock orl %esi, (%rdi)
+; CHECK-NEXT: sets %al
; CHECK-NEXT: retq
%3 = atomicrmw or ptr %0, i32 %1 seq_cst, align 4
%4 = or i32 %3, %1
define i1 @lock_and_sete(ptr %0, i32 %1) nounwind {
; CHECK-LABEL: lock_and_sete:
; CHECK: # %bb.0:
-; CHECK-NEXT: movl (%rdi), %eax
-; CHECK-NEXT: .p2align 4, 0x90
-; CHECK-NEXT: .LBB6_1: # %atomicrmw.start
-; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
-; CHECK-NEXT: movl %eax, %ecx
-; CHECK-NEXT: andl %esi, %ecx
-; CHECK-NEXT: lock cmpxchgl %ecx, (%rdi)
-; CHECK-NEXT: jne .LBB6_1
-; CHECK-NEXT: # %bb.2: # %atomicrmw.end
-; CHECK-NEXT: testl %esi, %eax
+; CHECK-NEXT: lock andl %esi, (%rdi)
; CHECK-NEXT: sete %al
; CHECK-NEXT: retq
%3 = atomicrmw and ptr %0, i32 %1 seq_cst, align 4
define i1 @lock_and_sets(ptr %0, i32 %1) nounwind {
; CHECK-LABEL: lock_and_sets:
; CHECK: # %bb.0:
-; CHECK-NEXT: movl (%rdi), %eax
-; CHECK-NEXT: .p2align 4, 0x90
-; CHECK-NEXT: .LBB7_1: # %atomicrmw.start
-; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
-; CHECK-NEXT: movl %eax, %ecx
-; CHECK-NEXT: andl %esi, %ecx
-; CHECK-NEXT: lock cmpxchgl %ecx, (%rdi)
-; CHECK-NEXT: jne .LBB7_1
-; CHECK-NEXT: # %bb.2: # %atomicrmw.end
-; CHECK-NEXT: andl %esi, %eax
-; CHECK-NEXT: shrl $31, %eax
-; CHECK-NEXT: # kill: def $al killed $al killed $eax
+; CHECK-NEXT: lock andl %esi, (%rdi)
+; CHECK-NEXT: sets %al
; CHECK-NEXT: retq
%3 = atomicrmw and ptr %0, i32 %1 seq_cst, align 4
%4 = and i32 %3, %1
define i1 @lock_xor_sete(ptr %0, i32 %1) nounwind {
; CHECK-LABEL: lock_xor_sete:
; CHECK: # %bb.0:
-; CHECK-NEXT: movl (%rdi), %eax
-; CHECK-NEXT: .p2align 4, 0x90
-; CHECK-NEXT: .LBB8_1: # %atomicrmw.start
-; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
-; CHECK-NEXT: movl %eax, %ecx
-; CHECK-NEXT: xorl %esi, %ecx
-; CHECK-NEXT: lock cmpxchgl %ecx, (%rdi)
-; CHECK-NEXT: jne .LBB8_1
-; CHECK-NEXT: # %bb.2: # %atomicrmw.end
-; CHECK-NEXT: cmpl %esi, %eax
+; CHECK-NEXT: lock xorl %esi, (%rdi)
; CHECK-NEXT: sete %al
; CHECK-NEXT: retq
%3 = atomicrmw xor ptr %0, i32 %1 seq_cst, align 4
define i1 @lock_xor_sets(ptr %0, i32 %1) nounwind {
; CHECK-LABEL: lock_xor_sets:
; CHECK: # %bb.0:
-; CHECK-NEXT: movl (%rdi), %eax
-; CHECK-NEXT: .p2align 4, 0x90
-; CHECK-NEXT: .LBB9_1: # %atomicrmw.start
-; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
-; CHECK-NEXT: movl %eax, %ecx
-; CHECK-NEXT: xorl %esi, %ecx
-; CHECK-NEXT: lock cmpxchgl %ecx, (%rdi)
-; CHECK-NEXT: jne .LBB9_1
-; CHECK-NEXT: # %bb.2: # %atomicrmw.end
-; CHECK-NEXT: xorl %esi, %eax
-; CHECK-NEXT: shrl $31, %eax
-; CHECK-NEXT: # kill: def $al killed $al killed $eax
+; CHECK-NEXT: lock xorl %esi, (%rdi)
+; CHECK-NEXT: sets %al
; CHECK-NEXT: retq
%3 = atomicrmw xor ptr %0, i32 %1 seq_cst, align 4
%4 = xor i32 %3, %1