// V0's elements are all poison or not. (e.g., add_with_overflow)
const WithOverflowInst *II;
if (match(I, m_ExtractValue(m_WithOverflowInst(II))) &&
- match(ValAssumedPoison, m_ExtractValue(m_Specific(II))))
+ (match(ValAssumedPoison, m_ExtractValue(m_Specific(II))) ||
+ llvm::is_contained(II->arg_operands(), ValAssumedPoison)))
return true;
}
return false;
define i1 @t1_commutative(i4 %size, i4 %nmemb) {
; CHECK-LABEL: @t1_commutative(
-; CHECK-NEXT: [[CMP:%.*]] = icmp eq i4 [[SIZE:%.*]], 0
-; CHECK-NEXT: [[SMUL:%.*]] = tail call { i4, i1 } @llvm.smul.with.overflow.i4(i4 [[SIZE]], i4 [[NMEMB:%.*]])
+; CHECK-NEXT: [[SMUL:%.*]] = tail call { i4, i1 } @llvm.smul.with.overflow.i4(i4 [[SIZE:%.*]], i4 [[NMEMB:%.*]])
; CHECK-NEXT: [[SMUL_OV:%.*]] = extractvalue { i4, i1 } [[SMUL]], 1
; CHECK-NEXT: [[PHITMP:%.*]] = xor i1 [[SMUL_OV]], true
-; CHECK-NEXT: [[OR:%.*]] = select i1 [[PHITMP]], i1 true, i1 [[CMP]]
-; CHECK-NEXT: ret i1 [[OR]]
+; CHECK-NEXT: ret i1 [[PHITMP]]
;
%cmp = icmp eq i4 %size, 0
%smul = tail call { i4, i1 } @llvm.smul.with.overflow.i4(i4 %size, i4 %nmemb)
define i1 @t0_smul(i4 %size, i4 %nmemb) {
; CHECK-LABEL: @t0_smul(
-; CHECK-NEXT: [[CMP:%.*]] = icmp ne i4 [[SIZE:%.*]], 0
-; CHECK-NEXT: [[SMUL:%.*]] = tail call { i4, i1 } @llvm.smul.with.overflow.i4(i4 [[SIZE]], i4 [[NMEMB:%.*]])
+; CHECK-NEXT: [[SMUL:%.*]] = tail call { i4, i1 } @llvm.smul.with.overflow.i4(i4 [[SIZE:%.*]], i4 [[NMEMB:%.*]])
; CHECK-NEXT: [[SMUL_OV:%.*]] = extractvalue { i4, i1 } [[SMUL]], 1
-; CHECK-NEXT: [[AND:%.*]] = select i1 [[SMUL_OV]], i1 [[CMP]], i1 false
-; CHECK-NEXT: ret i1 [[AND]]
+; CHECK-NEXT: ret i1 [[SMUL_OV]]
;
%cmp = icmp ne i4 %size, 0
%smul = tail call { i4, i1 } @llvm.smul.with.overflow.i4(i4 %size, i4 %nmemb)
; CHECK-NEXT: [[CMP:%.*]] = icmp eq i4 [[SIZE:%.*]], 0
; CHECK-NEXT: [[SMUL:%.*]] = tail call { i4, i1 } @llvm.smul.with.overflow.i4(i4 [[SIZE]], i4 [[NMEMB:%.*]])
; CHECK-NEXT: [[SMUL_OV:%.*]] = extractvalue { i4, i1 } [[SMUL]], 1
-; CHECK-NEXT: [[AND:%.*]] = select i1 [[SMUL_OV]], i1 [[CMP]], i1 false
+; CHECK-NEXT: [[AND:%.*]] = and i1 [[SMUL_OV]], [[CMP]]
; CHECK-NEXT: ret i1 [[AND]]
;
%cmp = icmp eq i4 %size, 0 ; not 'ne'
; CHECK-NEXT: [[CMP:%.*]] = icmp ne i4 [[SIZE:%.*]], 0
; CHECK-NEXT: [[SMUL:%.*]] = tail call { i4, i1 } @llvm.smul.with.overflow.i4(i4 [[SIZE]], i4 [[NMEMB:%.*]])
; CHECK-NEXT: [[SMUL_OV:%.*]] = extractvalue { i4, i1 } [[SMUL]], 1
-; CHECK-NEXT: [[AND:%.*]] = select i1 [[SMUL_OV]], i1 true, i1 [[CMP]]
+; CHECK-NEXT: [[AND:%.*]] = or i1 [[SMUL_OV]], [[CMP]]
; CHECK-NEXT: ret i1 [[AND]]
;
%cmp = icmp ne i4 %size, 0
; CHECK-NEXT: [[CMP:%.*]] = icmp ne i4 [[SIZE:%.*]], 1
; CHECK-NEXT: [[SMUL:%.*]] = tail call { i4, i1 } @llvm.smul.with.overflow.i4(i4 [[SIZE]], i4 [[NMEMB:%.*]])
; CHECK-NEXT: [[SMUL_OV:%.*]] = extractvalue { i4, i1 } [[SMUL]], 1
-; CHECK-NEXT: [[AND:%.*]] = select i1 [[SMUL_OV]], i1 [[CMP]], i1 false
+; CHECK-NEXT: [[AND:%.*]] = and i1 [[SMUL_OV]], [[CMP]]
; CHECK-NEXT: ret i1 [[AND]]
;
%cmp = icmp ne i4 %size, 1 ; should be '0'
define i1 @t1_commutative(i4 %size, i4 %nmemb) {
; CHECK-LABEL: @t1_commutative(
-; CHECK-NEXT: [[CMP:%.*]] = icmp eq i4 [[SIZE:%.*]], 0
-; CHECK-NEXT: [[UMUL:%.*]] = tail call { i4, i1 } @llvm.umul.with.overflow.i4(i4 [[SIZE]], i4 [[NMEMB:%.*]])
+; CHECK-NEXT: [[UMUL:%.*]] = tail call { i4, i1 } @llvm.umul.with.overflow.i4(i4 [[SIZE:%.*]], i4 [[NMEMB:%.*]])
; CHECK-NEXT: [[UMUL_OV:%.*]] = extractvalue { i4, i1 } [[UMUL]], 1
; CHECK-NEXT: [[PHITMP:%.*]] = xor i1 [[UMUL_OV]], true
-; CHECK-NEXT: [[OR:%.*]] = select i1 [[PHITMP]], i1 true, i1 [[CMP]]
-; CHECK-NEXT: ret i1 [[OR]]
+; CHECK-NEXT: ret i1 [[PHITMP]]
;
%cmp = icmp eq i4 %size, 0
%umul = tail call { i4, i1 } @llvm.umul.with.overflow.i4(i4 %size, i4 %nmemb)
define i1 @t0_umul(i4 %size, i4 %nmemb) {
; CHECK-LABEL: @t0_umul(
-; CHECK-NEXT: [[CMP:%.*]] = icmp ne i4 [[SIZE:%.*]], 0
-; CHECK-NEXT: [[UMUL:%.*]] = tail call { i4, i1 } @llvm.umul.with.overflow.i4(i4 [[SIZE]], i4 [[NMEMB:%.*]])
+; CHECK-NEXT: [[UMUL:%.*]] = tail call { i4, i1 } @llvm.umul.with.overflow.i4(i4 [[SIZE:%.*]], i4 [[NMEMB:%.*]])
; CHECK-NEXT: [[UMUL_OV:%.*]] = extractvalue { i4, i1 } [[UMUL]], 1
-; CHECK-NEXT: [[AND:%.*]] = select i1 [[UMUL_OV]], i1 [[CMP]], i1 false
-; CHECK-NEXT: ret i1 [[AND]]
+; CHECK-NEXT: ret i1 [[UMUL_OV]]
;
%cmp = icmp ne i4 %size, 0
%umul = tail call { i4, i1 } @llvm.umul.with.overflow.i4(i4 %size, i4 %nmemb)
; CHECK-NEXT: [[CMP:%.*]] = icmp eq i4 [[SIZE:%.*]], 0
; CHECK-NEXT: [[UMUL:%.*]] = tail call { i4, i1 } @llvm.umul.with.overflow.i4(i4 [[SIZE]], i4 [[NMEMB:%.*]])
; CHECK-NEXT: [[UMUL_OV:%.*]] = extractvalue { i4, i1 } [[UMUL]], 1
-; CHECK-NEXT: [[AND:%.*]] = select i1 [[UMUL_OV]], i1 [[CMP]], i1 false
+; CHECK-NEXT: [[AND:%.*]] = and i1 [[UMUL_OV]], [[CMP]]
; CHECK-NEXT: ret i1 [[AND]]
;
%cmp = icmp eq i4 %size, 0 ; not 'ne'
; CHECK-NEXT: [[CMP:%.*]] = icmp ne i4 [[SIZE:%.*]], 0
; CHECK-NEXT: [[UMUL:%.*]] = tail call { i4, i1 } @llvm.umul.with.overflow.i4(i4 [[SIZE]], i4 [[NMEMB:%.*]])
; CHECK-NEXT: [[UMUL_OV:%.*]] = extractvalue { i4, i1 } [[UMUL]], 1
-; CHECK-NEXT: [[AND:%.*]] = select i1 [[UMUL_OV]], i1 true, i1 [[CMP]]
+; CHECK-NEXT: [[AND:%.*]] = or i1 [[UMUL_OV]], [[CMP]]
; CHECK-NEXT: ret i1 [[AND]]
;
%cmp = icmp ne i4 %size, 0
; CHECK-NEXT: [[CMP:%.*]] = icmp ne i4 [[SIZE:%.*]], 1
; CHECK-NEXT: [[UMUL:%.*]] = tail call { i4, i1 } @llvm.umul.with.overflow.i4(i4 [[SIZE]], i4 [[NMEMB:%.*]])
; CHECK-NEXT: [[UMUL_OV:%.*]] = extractvalue { i4, i1 } [[UMUL]], 1
-; CHECK-NEXT: [[AND:%.*]] = select i1 [[UMUL_OV]], i1 [[CMP]], i1 false
+; CHECK-NEXT: [[AND:%.*]] = and i1 [[UMUL_OV]], [[CMP]]
; CHECK-NEXT: ret i1 [[AND]]
;
%cmp = icmp ne i4 %size, 1 ; should be '0'