--- /dev/null
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown | FileCheck %s
+
+define i1 @lock_add_sete(ptr %0, i32 %1) nounwind {
+; CHECK-LABEL: lock_add_sete:
+; CHECK: # %bb.0:
+; CHECK-NEXT: movl %esi, %eax
+; CHECK-NEXT: lock xaddl %eax, (%rdi)
+; CHECK-NEXT: addl %esi, %eax
+; CHECK-NEXT: sete %al
+; CHECK-NEXT: retq
+ %3 = atomicrmw add ptr %0, i32 %1 seq_cst, align 4
+ %4 = sub i32 0, %1
+ %5 = icmp eq i32 %3, %4
+ ret i1 %5
+}
+
+define i1 @lock_add_sets(ptr %0, i32 %1) nounwind {
+; CHECK-LABEL: lock_add_sets:
+; CHECK: # %bb.0:
+; CHECK-NEXT: movl %esi, %eax
+; CHECK-NEXT: lock xaddl %eax, (%rdi)
+; CHECK-NEXT: addl %esi, %eax
+; CHECK-NEXT: shrl $31, %eax
+; CHECK-NEXT: # kill: def $al killed $al killed $eax
+; CHECK-NEXT: retq
+ %3 = atomicrmw add ptr %0, i32 %1 seq_cst, align 4
+ %4 = add i32 %3, %1
+ %5 = icmp slt i32 %4, 0
+ ret i1 %5
+}
+
+define i1 @lock_sub_sete(ptr %0, i32 %1) nounwind {
+; CHECK-LABEL: lock_sub_sete:
+; CHECK: # %bb.0:
+; CHECK-NEXT: movl %esi, %eax
+; CHECK-NEXT: negl %eax
+; CHECK-NEXT: lock xaddl %eax, (%rdi)
+; CHECK-NEXT: cmpl %esi, %eax
+; CHECK-NEXT: sete %al
+; CHECK-NEXT: retq
+ %3 = atomicrmw sub ptr %0, i32 %1 seq_cst, align 4
+ %4 = icmp eq i32 %3, %1
+ ret i1 %4
+}
+
+define i1 @lock_sub_sets(ptr %0, i32 %1) nounwind {
+; CHECK-LABEL: lock_sub_sets:
+; CHECK: # %bb.0:
+; CHECK-NEXT: movl %esi, %eax
+; CHECK-NEXT: negl %eax
+; CHECK-NEXT: lock xaddl %eax, (%rdi)
+; CHECK-NEXT: subl %esi, %eax
+; CHECK-NEXT: shrl $31, %eax
+; CHECK-NEXT: # kill: def $al killed $al killed $eax
+; CHECK-NEXT: retq
+ %3 = atomicrmw sub ptr %0, i32 %1 seq_cst, align 4
+ %4 = sub i32 %3, %1
+ %5 = icmp slt i32 %4, 0
+ ret i1 %5
+}
+
+define i1 @lock_or_sete(ptr %0, i32 %1) nounwind {
+; CHECK-LABEL: lock_or_sete:
+; CHECK: # %bb.0:
+; CHECK-NEXT: movl (%rdi), %eax
+; CHECK-NEXT: .p2align 4, 0x90
+; CHECK-NEXT: .LBB4_1: # %atomicrmw.start
+; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
+; CHECK-NEXT: movl %eax, %ecx
+; CHECK-NEXT: orl %esi, %ecx
+; CHECK-NEXT: lock cmpxchgl %ecx, (%rdi)
+; CHECK-NEXT: jne .LBB4_1
+; CHECK-NEXT: # %bb.2: # %atomicrmw.end
+; CHECK-NEXT: orl %esi, %eax
+; CHECK-NEXT: sete %al
+; CHECK-NEXT: retq
+ %3 = atomicrmw or ptr %0, i32 %1 seq_cst, align 4
+ %4 = or i32 %3, %1
+ %5 = icmp eq i32 %4, 0
+ ret i1 %5
+}
+
+define i1 @lock_or_sets(ptr %0, i32 %1) nounwind {
+; CHECK-LABEL: lock_or_sets:
+; CHECK: # %bb.0:
+; CHECK-NEXT: movl (%rdi), %eax
+; CHECK-NEXT: .p2align 4, 0x90
+; CHECK-NEXT: .LBB5_1: # %atomicrmw.start
+; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
+; CHECK-NEXT: movl %eax, %ecx
+; CHECK-NEXT: orl %esi, %ecx
+; CHECK-NEXT: lock cmpxchgl %ecx, (%rdi)
+; CHECK-NEXT: jne .LBB5_1
+; CHECK-NEXT: # %bb.2: # %atomicrmw.end
+; CHECK-NEXT: orl %esi, %eax
+; CHECK-NEXT: shrl $31, %eax
+; CHECK-NEXT: # kill: def $al killed $al killed $eax
+; CHECK-NEXT: retq
+ %3 = atomicrmw or ptr %0, i32 %1 seq_cst, align 4
+ %4 = or i32 %3, %1
+ %5 = icmp slt i32 %4, 0
+ ret i1 %5
+}
+
+define i1 @lock_and_sete(ptr %0, i32 %1) nounwind {
+; CHECK-LABEL: lock_and_sete:
+; CHECK: # %bb.0:
+; CHECK-NEXT: movl (%rdi), %eax
+; CHECK-NEXT: .p2align 4, 0x90
+; CHECK-NEXT: .LBB6_1: # %atomicrmw.start
+; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
+; CHECK-NEXT: movl %eax, %ecx
+; CHECK-NEXT: andl %esi, %ecx
+; CHECK-NEXT: lock cmpxchgl %ecx, (%rdi)
+; CHECK-NEXT: jne .LBB6_1
+; CHECK-NEXT: # %bb.2: # %atomicrmw.end
+; CHECK-NEXT: testl %esi, %eax
+; CHECK-NEXT: sete %al
+; CHECK-NEXT: retq
+ %3 = atomicrmw and ptr %0, i32 %1 seq_cst, align 4
+ %4 = and i32 %3, %1
+ %5 = icmp eq i32 %4, 0
+ ret i1 %5
+}
+
+define i1 @lock_and_sets(ptr %0, i32 %1) nounwind {
+; CHECK-LABEL: lock_and_sets:
+; CHECK: # %bb.0:
+; CHECK-NEXT: movl (%rdi), %eax
+; CHECK-NEXT: .p2align 4, 0x90
+; CHECK-NEXT: .LBB7_1: # %atomicrmw.start
+; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
+; CHECK-NEXT: movl %eax, %ecx
+; CHECK-NEXT: andl %esi, %ecx
+; CHECK-NEXT: lock cmpxchgl %ecx, (%rdi)
+; CHECK-NEXT: jne .LBB7_1
+; CHECK-NEXT: # %bb.2: # %atomicrmw.end
+; CHECK-NEXT: andl %esi, %eax
+; CHECK-NEXT: shrl $31, %eax
+; CHECK-NEXT: # kill: def $al killed $al killed $eax
+; CHECK-NEXT: retq
+ %3 = atomicrmw and ptr %0, i32 %1 seq_cst, align 4
+ %4 = and i32 %3, %1
+ %5 = icmp slt i32 %4, 0
+ ret i1 %5
+}
+
+define i1 @lock_xor_sete(ptr %0, i32 %1) nounwind {
+; CHECK-LABEL: lock_xor_sete:
+; CHECK: # %bb.0:
+; CHECK-NEXT: movl (%rdi), %eax
+; CHECK-NEXT: .p2align 4, 0x90
+; CHECK-NEXT: .LBB8_1: # %atomicrmw.start
+; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
+; CHECK-NEXT: movl %eax, %ecx
+; CHECK-NEXT: xorl %esi, %ecx
+; CHECK-NEXT: lock cmpxchgl %ecx, (%rdi)
+; CHECK-NEXT: jne .LBB8_1
+; CHECK-NEXT: # %bb.2: # %atomicrmw.end
+; CHECK-NEXT: cmpl %esi, %eax
+; CHECK-NEXT: sete %al
+; CHECK-NEXT: retq
+ %3 = atomicrmw xor ptr %0, i32 %1 seq_cst, align 4
+ %4 = icmp eq i32 %3, %1
+ ret i1 %4
+}
+
+define i1 @lock_xor_sets(ptr %0, i32 %1) nounwind {
+; CHECK-LABEL: lock_xor_sets:
+; CHECK: # %bb.0:
+; CHECK-NEXT: movl (%rdi), %eax
+; CHECK-NEXT: .p2align 4, 0x90
+; CHECK-NEXT: .LBB9_1: # %atomicrmw.start
+; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
+; CHECK-NEXT: movl %eax, %ecx
+; CHECK-NEXT: xorl %esi, %ecx
+; CHECK-NEXT: lock cmpxchgl %ecx, (%rdi)
+; CHECK-NEXT: jne .LBB9_1
+; CHECK-NEXT: # %bb.2: # %atomicrmw.end
+; CHECK-NEXT: xorl %esi, %eax
+; CHECK-NEXT: shrl $31, %eax
+; CHECK-NEXT: # kill: def $al killed $al killed $eax
+; CHECK-NEXT: retq
+ %3 = atomicrmw xor ptr %0, i32 %1 seq_cst, align 4
+ %4 = xor i32 %3, %1
+ %5 = icmp slt i32 %4, 0
+ ret i1 %5
+}