#include "llvm/ADT/APSInt.h"
#include "llvm/ADT/SetVector.h"
#include "llvm/ADT/Statistic.h"
+#include "llvm/Analysis/CmpInstAnalysis.h"
#include "llvm/Analysis/ConstantFolding.h"
#include "llvm/Analysis/InstructionSimplify.h"
#include "llvm/Analysis/TargetLibraryInfo.h"
return new ICmpInst(NewPred, X, SubOne(cast<Constant>(Cmp.getOperand(1))));
}
- // (X & C2) == 0 -> (trunc X) >= 0
- // (X & C2) != 0 -> (trunc X) < 0
- // iff C2 is a power of 2 and it masks the sign bit of a legal integer type.
- const APInt *C2;
- if (And->hasOneUse() && C.isZero() && match(Y, m_APInt(C2))) {
- int32_t ExactLogBase2 = C2->exactLogBase2();
- if (ExactLogBase2 != -1 && DL.isLegalInteger(ExactLogBase2 + 1)) {
- Type *NTy = IntegerType::get(Cmp.getContext(), ExactLogBase2 + 1);
- if (auto *AndVTy = dyn_cast<VectorType>(And->getType()))
- NTy = VectorType::get(NTy, AndVTy->getElementCount());
- Value *Trunc = Builder.CreateTrunc(X, NTy);
- auto NewPred =
- Pred == CmpInst::ICMP_EQ ? CmpInst::ICMP_SGE : CmpInst::ICMP_SLT;
- return new ICmpInst(NewPred, Trunc, Constant::getNullValue(NTy));
- }
- }
-
return nullptr;
}
static Instruction *foldICmpWithTrunc(ICmpInst &ICmp,
InstCombiner::BuilderTy &Builder) {
- const ICmpInst::Predicate Pred = ICmp.getPredicate();
+ ICmpInst::Predicate Pred = ICmp.getPredicate();
Value *Op0 = ICmp.getOperand(0), *Op1 = ICmp.getOperand(1);
// Try to canonicalize trunc + compare-to-constant into a mask + cmp.
if (!match(Op0, m_OneUse(m_Trunc(m_Value(X)))) || !match(Op1, m_APInt(C)))
return nullptr;
+ // This matches patterns corresponding to tests of the signbit as well as:
+ // (trunc X) u< C --> (X & -C) == 0 (are all masked-high-bits clear?)
+ // (trunc X) u> C --> (X & ~C) != 0 (are any masked-high-bits set?)
+ APInt Mask;
+ if (decomposeBitTestICmp(Op0, Op1, Pred, X, Mask, true /* WithTrunc */)) {
+ Value *And = Builder.CreateAnd(X, Mask);
+ Constant *Zero = ConstantInt::getNullValue(X->getType());
+ return new ICmpInst(Pred, And, Zero);
+ }
+
unsigned SrcBits = X->getType()->getScalarSizeInBits();
- if (Pred == ICmpInst::ICMP_ULT) {
- if (C->isPowerOf2()) {
- // If C is a power-of-2 (one set bit):
- // (trunc X) u< C --> (X & -C) == 0 (are all masked-high-bits clear?)
- Constant *MaskC = ConstantInt::get(X->getType(), (-*C).zext(SrcBits));
- Value *And = Builder.CreateAnd(X, MaskC);
- Constant *Zero = ConstantInt::getNullValue(X->getType());
- return new ICmpInst(ICmpInst::ICMP_EQ, And, Zero);
- }
+ if (Pred == ICmpInst::ICMP_ULT && C->isNegatedPowerOf2()) {
// If C is a negative power-of-2 (high-bit mask):
// (trunc X) u< C --> (X & C) != C (are any masked-high-bits clear?)
- if (C->isNegatedPowerOf2()) {
- Constant *MaskC = ConstantInt::get(X->getType(), C->zext(SrcBits));
- Value *And = Builder.CreateAnd(X, MaskC);
- return new ICmpInst(ICmpInst::ICMP_NE, And, MaskC);
- }
+ Constant *MaskC = ConstantInt::get(X->getType(), C->zext(SrcBits));
+ Value *And = Builder.CreateAnd(X, MaskC);
+ return new ICmpInst(ICmpInst::ICMP_NE, And, MaskC);
}
- if (Pred == ICmpInst::ICMP_UGT) {
- // If C is a low-bit-mask (C+1 is a power-of-2):
- // (trunc X) u> C --> (X & ~C) != 0 (are any masked-high-bits set?)
- if (C->isMask()) {
- Constant *MaskC = ConstantInt::get(X->getType(), (~*C).zext(SrcBits));
- Value *And = Builder.CreateAnd(X, MaskC);
- Constant *Zero = ConstantInt::getNullValue(X->getType());
- return new ICmpInst(ICmpInst::ICMP_NE, And, Zero);
- }
+ if (Pred == ICmpInst::ICMP_UGT && (~*C).isPowerOf2()) {
// If C is not-of-power-of-2 (one clear bit):
// (trunc X) u> C --> (X & (C+1)) == C+1 (are all masked-high-bits set?)
- if ((~*C).isPowerOf2()) {
- Constant *MaskC = ConstantInt::get(X->getType(), (*C + 1).zext(SrcBits));
- Value *And = Builder.CreateAnd(X, MaskC);
- return new ICmpInst(ICmpInst::ICMP_EQ, And, MaskC);
- }
+ Constant *MaskC = ConstantInt::get(X->getType(), (*C + 1).zext(SrcBits));
+ Value *And = Builder.CreateAnd(X, MaskC);
+ return new ICmpInst(ICmpInst::ICMP_EQ, And, MaskC);
}
return nullptr;
define i1 @test2(i64 %A) {
; CHECK-LABEL: @test2(
-; CHECK-NEXT: [[TMP1:%.*]] = trunc i64 [[A:%.*]] to i8
-; CHECK-NEXT: [[CMP:%.*]] = icmp sgt i8 [[TMP1]], -1
+; CHECK-NEXT: [[AND:%.*]] = and i64 [[A:%.*]], 128
+; CHECK-NEXT: [[CMP:%.*]] = icmp eq i64 [[AND]], 0
; CHECK-NEXT: ret i1 [[CMP]]
;
%and = and i64 %A, 128
define <2 x i1> @test2vec(<2 x i64> %A) {
; CHECK-LABEL: @test2vec(
-; CHECK-NEXT: [[TMP1:%.*]] = trunc <2 x i64> [[A:%.*]] to <2 x i8>
-; CHECK-NEXT: [[CMP:%.*]] = icmp sgt <2 x i8> [[TMP1]], <i8 -1, i8 -1>
+; CHECK-NEXT: [[AND:%.*]] = and <2 x i64> [[A:%.*]], <i64 128, i64 128>
+; CHECK-NEXT: [[CMP:%.*]] = icmp eq <2 x i64> [[AND]], zeroinitializer
; CHECK-NEXT: ret <2 x i1> [[CMP]]
;
%and = and <2 x i64> %A, <i64 128, i64 128>
define i1 @test3(i64 %A) {
; CHECK-LABEL: @test3(
-; CHECK-NEXT: [[TMP1:%.*]] = trunc i64 [[A:%.*]] to i8
-; CHECK-NEXT: [[CMP:%.*]] = icmp slt i8 [[TMP1]], 0
+; CHECK-NEXT: [[AND:%.*]] = and i64 [[A:%.*]], 128
+; CHECK-NEXT: [[CMP:%.*]] = icmp ne i64 [[AND]], 0
; CHECK-NEXT: ret i1 [[CMP]]
;
%and = and i64 %A, 128
define <2 x i1> @test3vec(<2 x i64> %A) {
; CHECK-LABEL: @test3vec(
-; CHECK-NEXT: [[TMP1:%.*]] = trunc <2 x i64> [[A:%.*]] to <2 x i8>
-; CHECK-NEXT: [[CMP:%.*]] = icmp slt <2 x i8> [[TMP1]], zeroinitializer
+; CHECK-NEXT: [[AND:%.*]] = and <2 x i64> [[A:%.*]], <i64 128, i64 128>
+; CHECK-NEXT: [[CMP:%.*]] = icmp ne <2 x i64> [[AND]], zeroinitializer
; CHECK-NEXT: ret <2 x i1> [[CMP]]
;
%and = and <2 x i64> %A, <i64 128, i64 128>
ret <2 x i1> %r
}
-; negative test
+; negative test - but this reduces with a mask op
define i1 @shift_trunc_wrong_shift(i32 %x) {
; CHECK-LABEL: @shift_trunc_wrong_shift(
-; CHECK-NEXT: [[SH:%.*]] = lshr i32 [[X:%.*]], 23
-; CHECK-NEXT: [[TR:%.*]] = trunc i32 [[SH]] to i8
-; CHECK-NEXT: [[R:%.*]] = icmp slt i8 [[TR]], 0
+; CHECK-NEXT: [[TMP1:%.*]] = and i32 [[X:%.*]], 1073741824
+; CHECK-NEXT: [[R:%.*]] = icmp ne i32 [[TMP1]], 0
; CHECK-NEXT: ret i1 [[R]]
;
%sh = lshr i32 %x, 23
; Larger than the pointer size for a non-zero address space
define i1 @test18_as1(i16 addrspace(1)* %P, i32 %I) {
; CHECK-LABEL: @test18_as1(
-; CHECK-NEXT: [[TMP1:%.*]] = trunc i32 [[I:%.*]] to i16
-; CHECK-NEXT: [[C:%.*]] = icmp slt i16 [[TMP1]], 0
+; CHECK-NEXT: [[TMP1:%.*]] = and i32 [[I:%.*]], 32768
+; CHECK-NEXT: [[C:%.*]] = icmp ne i32 [[TMP1]], 0
; CHECK-NEXT: ret i1 [[C]]
;
%X = getelementptr inbounds i16, i16 addrspace(1)* %P, i32 %I
; Smaller than the pointer size for a non-zero address space
define i1 @test18_as1_i32(i16 addrspace(1)* %P, i32 %I) {
; CHECK-LABEL: @test18_as1_i32(
-; CHECK-NEXT: [[TMP1:%.*]] = trunc i32 [[I:%.*]] to i16
-; CHECK-NEXT: [[C:%.*]] = icmp slt i16 [[TMP1]], 0
+; CHECK-NEXT: [[TMP1:%.*]] = and i32 [[I:%.*]], 32768
+; CHECK-NEXT: [[C:%.*]] = icmp ne i32 [[TMP1]], 0
; CHECK-NEXT: ret i1 [[C]]
;
%X = getelementptr inbounds i16, i16 addrspace(1)* %P, i32 %I
; Larger than the pointer size
define i1 @test18_i128(i16* %P, i128 %I) {
; CHECK-LABEL: @test18_i128(
-; CHECK-NEXT: [[TMP1:%.*]] = trunc i128 [[I:%.*]] to i64
-; CHECK-NEXT: [[C:%.*]] = icmp slt i64 [[TMP1]], 0
+; CHECK-NEXT: [[TMP1:%.*]] = and i128 [[I:%.*]], 9223372036854775808
+; CHECK-NEXT: [[C:%.*]] = icmp ne i128 [[TMP1]], 0
; CHECK-NEXT: ret i1 [[C]]
;
%X = getelementptr inbounds i16, i16* %P, i128 %I
define i1 @slt_0(i32 %x) {
; CHECK-LABEL: @slt_0(
-; CHECK-NEXT: [[T:%.*]] = trunc i32 [[X:%.*]] to i8
-; CHECK-NEXT: [[R:%.*]] = icmp slt i8 [[T]], 0
+; CHECK-NEXT: [[TMP1:%.*]] = and i32 [[X:%.*]], 128
+; CHECK-NEXT: [[R:%.*]] = icmp ne i32 [[TMP1]], 0
; CHECK-NEXT: ret i1 [[R]]
;
%t = trunc i32 %x to i8
define <2 x i1> @slt_0_splat(<2 x i16> %x) {
; CHECK-LABEL: @slt_0_splat(
-; CHECK-NEXT: [[T:%.*]] = trunc <2 x i16> [[X:%.*]] to <2 x i11>
-; CHECK-NEXT: [[R:%.*]] = icmp slt <2 x i11> [[T]], zeroinitializer
+; CHECK-NEXT: [[TMP1:%.*]] = and <2 x i16> [[X:%.*]], <i16 1024, i16 1024>
+; CHECK-NEXT: [[R:%.*]] = icmp ne <2 x i16> [[TMP1]], zeroinitializer
; CHECK-NEXT: ret <2 x i1> [[R]]
;
%t = trunc <2 x i16> %x to <2 x i11>
define i1 @sgt_n1(i32 %x) {
; CHECK-LABEL: @sgt_n1(
-; CHECK-NEXT: [[T:%.*]] = trunc i32 [[X:%.*]] to i8
-; CHECK-NEXT: [[R:%.*]] = icmp sgt i8 [[T]], -1
+; CHECK-NEXT: [[TMP1:%.*]] = and i32 [[X:%.*]], 128
+; CHECK-NEXT: [[R:%.*]] = icmp eq i32 [[TMP1]], 0
; CHECK-NEXT: ret i1 [[R]]
;
%t = trunc i32 %x to i8
define <2 x i1> @sgt_n1_splat(<2 x i16> %x) {
; CHECK-LABEL: @sgt_n1_splat(
-; CHECK-NEXT: [[T:%.*]] = trunc <2 x i16> [[X:%.*]] to <2 x i11>
-; CHECK-NEXT: [[R:%.*]] = icmp sgt <2 x i11> [[T]], <i11 -1, i11 -1>
+; CHECK-NEXT: [[TMP1:%.*]] = and <2 x i16> [[X:%.*]], <i16 1024, i16 1024>
+; CHECK-NEXT: [[R:%.*]] = icmp eq <2 x i16> [[TMP1]], zeroinitializer
; CHECK-NEXT: ret <2 x i1> [[R]]
;
%t = trunc <2 x i16> %x to <2 x i11>
; Make sure we can still perform this optimization with a truncate present
define i32 @test35_with_trunc(i64 %x) {
; CHECK-LABEL: @test35_with_trunc(
-; CHECK-NEXT: [[X1:%.*]] = trunc i64 [[X:%.*]] to i32
-; CHECK-NEXT: [[CMP:%.*]] = icmp sgt i32 [[X1]], -1
+; CHECK-NEXT: [[TMP1:%.*]] = and i64 [[X:%.*]], 2147483648
+; CHECK-NEXT: [[CMP:%.*]] = icmp eq i64 [[TMP1]], 0
; CHECK-NEXT: [[COND:%.*]] = select i1 [[CMP]], i32 60, i32 100
; CHECK-NEXT: ret i32 [[COND]]
;
define i32 @test73(i32 %x) {
; CHECK-LABEL: @test73(
-; CHECK-NEXT: [[TMP1:%.*]] = trunc i32 [[X:%.*]] to i8
-; CHECK-NEXT: [[TMP2:%.*]] = icmp sgt i8 [[TMP1]], -1
+; CHECK-NEXT: [[TMP1:%.*]] = and i32 [[X:%.*]], 128
+; CHECK-NEXT: [[TMP2:%.*]] = icmp eq i32 [[TMP1]], 0
; CHECK-NEXT: [[TMP3:%.*]] = select i1 [[TMP2]], i32 40, i32 42
; CHECK-NEXT: ret i32 [[TMP3]]
;
define <2 x i32> @test73vec(<2 x i32> %x) {
; CHECK-LABEL: @test73vec(
-; CHECK-NEXT: [[TMP1:%.*]] = trunc <2 x i32> [[X:%.*]] to <2 x i8>
-; CHECK-NEXT: [[TMP2:%.*]] = icmp sgt <2 x i8> [[TMP1]], <i8 -1, i8 -1>
+; CHECK-NEXT: [[TMP1:%.*]] = and <2 x i32> [[X:%.*]], <i32 128, i32 128>
+; CHECK-NEXT: [[TMP2:%.*]] = icmp eq <2 x i32> [[TMP1]], zeroinitializer
; CHECK-NEXT: [[TMP3:%.*]] = select <2 x i1> [[TMP2]], <2 x i32> <i32 40, i32 40>, <2 x i32> <i32 42, i32 42>
; CHECK-NEXT: ret <2 x i32> [[TMP3]]
;
define i32 @test68(i32 %x, i32 %y) {
; CHECK-LABEL: @test68(
-; CHECK-NEXT: [[TMP1:%.*]] = lshr i32 [[X:%.*]], 6
-; CHECK-NEXT: [[TMP2:%.*]] = and i32 [[TMP1]], 2
-; CHECK-NEXT: [[TMP3:%.*]] = or i32 [[TMP2]], [[Y:%.*]]
-; CHECK-NEXT: ret i32 [[TMP3]]
+; CHECK-NEXT: [[AND:%.*]] = lshr i32 [[X:%.*]], 6
+; CHECK-NEXT: [[TMP1:%.*]] = and i32 [[AND]], 2
+; CHECK-NEXT: [[TMP2:%.*]] = or i32 [[TMP1]], [[Y:%.*]]
+; CHECK-NEXT: ret i32 [[TMP2]]
;
%and = and i32 %x, 128
%cmp = icmp eq i32 %and, 0
define <2 x i32> @test68vec(<2 x i32> %x, <2 x i32> %y) {
; CHECK-LABEL: @test68vec(
-; CHECK-NEXT: [[TMP1:%.*]] = lshr <2 x i32> [[X:%.*]], <i32 6, i32 6>
-; CHECK-NEXT: [[TMP2:%.*]] = and <2 x i32> [[TMP1]], <i32 2, i32 2>
-; CHECK-NEXT: [[TMP3:%.*]] = or <2 x i32> [[TMP2]], [[Y:%.*]]
-; CHECK-NEXT: ret <2 x i32> [[TMP3]]
+; CHECK-NEXT: [[AND:%.*]] = lshr <2 x i32> [[X:%.*]], <i32 6, i32 6>
+; CHECK-NEXT: [[TMP1:%.*]] = and <2 x i32> [[AND]], <i32 2, i32 2>
+; CHECK-NEXT: [[TMP2:%.*]] = or <2 x i32> [[TMP1]], [[Y:%.*]]
+; CHECK-NEXT: ret <2 x i32> [[TMP2]]
;
%and = and <2 x i32> %x, <i32 128, i32 128>
%cmp = icmp eq <2 x i32> %and, zeroinitializer
define i32 @test68_xor(i32 %x, i32 %y) {
; CHECK-LABEL: @test68_xor(
-; CHECK-NEXT: [[TMP1:%.*]] = trunc i32 [[X:%.*]] to i8
-; CHECK-NEXT: [[CMP:%.*]] = icmp sgt i8 [[TMP1]], -1
+; CHECK-NEXT: [[AND:%.*]] = and i32 [[X:%.*]], 128
+; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[AND]], 0
; CHECK-NEXT: [[XOR:%.*]] = xor i32 [[Y:%.*]], 2
; CHECK-NEXT: [[SELECT:%.*]] = select i1 [[CMP]], i32 [[Y]], i32 [[XOR]]
; CHECK-NEXT: ret i32 [[SELECT]]
define i32 @test68_and(i32 %x, i32 %y) {
; CHECK-LABEL: @test68_and(
-; CHECK-NEXT: [[TMP1:%.*]] = trunc i32 [[X:%.*]] to i8
-; CHECK-NEXT: [[CMP:%.*]] = icmp sgt i8 [[TMP1]], -1
+; CHECK-NEXT: [[AND:%.*]] = and i32 [[X:%.*]], 128
+; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[AND]], 0
; CHECK-NEXT: [[AND2:%.*]] = and i32 [[Y:%.*]], -3
; CHECK-NEXT: [[SELECT:%.*]] = select i1 [[CMP]], i32 [[Y]], i32 [[AND2]]
; CHECK-NEXT: ret i32 [[SELECT]]
define i32 @test69(i32 %x, i32 %y) {
; CHECK-LABEL: @test69(
-; CHECK-NEXT: [[TMP1:%.*]] = lshr i32 [[X:%.*]], 6
-; CHECK-NEXT: [[TMP2:%.*]] = and i32 [[TMP1]], 2
-; CHECK-NEXT: [[TMP3:%.*]] = xor i32 [[TMP2]], 2
-; CHECK-NEXT: [[TMP4:%.*]] = or i32 [[TMP3]], [[Y:%.*]]
-; CHECK-NEXT: ret i32 [[TMP4]]
+; CHECK-NEXT: [[AND:%.*]] = lshr i32 [[X:%.*]], 6
+; CHECK-NEXT: [[TMP1:%.*]] = and i32 [[AND]], 2
+; CHECK-NEXT: [[TMP2:%.*]] = xor i32 [[TMP1]], 2
+; CHECK-NEXT: [[TMP3:%.*]] = or i32 [[TMP2]], [[Y:%.*]]
+; CHECK-NEXT: ret i32 [[TMP3]]
;
%and = and i32 %x, 128
%cmp = icmp ne i32 %and, 0
define <2 x i32> @test69vec(<2 x i32> %x, <2 x i32> %y) {
; CHECK-LABEL: @test69vec(
-; CHECK-NEXT: [[TMP1:%.*]] = lshr <2 x i32> [[X:%.*]], <i32 6, i32 6>
-; CHECK-NEXT: [[TMP2:%.*]] = and <2 x i32> [[TMP1]], <i32 2, i32 2>
-; CHECK-NEXT: [[TMP3:%.*]] = xor <2 x i32> [[TMP2]], <i32 2, i32 2>
-; CHECK-NEXT: [[TMP4:%.*]] = or <2 x i32> [[TMP3]], [[Y:%.*]]
-; CHECK-NEXT: ret <2 x i32> [[TMP4]]
+; CHECK-NEXT: [[AND:%.*]] = lshr <2 x i32> [[X:%.*]], <i32 6, i32 6>
+; CHECK-NEXT: [[TMP1:%.*]] = and <2 x i32> [[AND]], <i32 2, i32 2>
+; CHECK-NEXT: [[TMP2:%.*]] = xor <2 x i32> [[TMP1]], <i32 2, i32 2>
+; CHECK-NEXT: [[TMP3:%.*]] = or <2 x i32> [[TMP2]], [[Y:%.*]]
+; CHECK-NEXT: ret <2 x i32> [[TMP3]]
;
%and = and <2 x i32> %x, <i32 128, i32 128>
%cmp = icmp ne <2 x i32> %and, zeroinitializer
define i32 @test69_xor(i32 %x, i32 %y) {
; CHECK-LABEL: @test69_xor(
-; CHECK-NEXT: [[TMP1:%.*]] = trunc i32 [[X:%.*]] to i8
-; CHECK-NEXT: [[CMP_NOT:%.*]] = icmp sgt i8 [[TMP1]], -1
+; CHECK-NEXT: [[AND:%.*]] = and i32 [[X:%.*]], 128
+; CHECK-NEXT: [[CMP_NOT:%.*]] = icmp eq i32 [[AND]], 0
; CHECK-NEXT: [[XOR:%.*]] = xor i32 [[Y:%.*]], 2
; CHECK-NEXT: [[SELECT:%.*]] = select i1 [[CMP_NOT]], i32 [[XOR]], i32 [[Y]]
; CHECK-NEXT: ret i32 [[SELECT]]
define i32 @test69_and(i32 %x, i32 %y) {
; CHECK-LABEL: @test69_and(
-; CHECK-NEXT: [[TMP1:%.*]] = trunc i32 [[X:%.*]] to i8
-; CHECK-NEXT: [[CMP_NOT:%.*]] = icmp sgt i8 [[TMP1]], -1
+; CHECK-NEXT: [[AND:%.*]] = and i32 [[X:%.*]], 128
+; CHECK-NEXT: [[CMP_NOT:%.*]] = icmp eq i32 [[AND]], 0
; CHECK-NEXT: [[AND2:%.*]] = and i32 [[Y:%.*]], 2
; CHECK-NEXT: [[SELECT:%.*]] = select i1 [[CMP_NOT]], i32 [[AND2]], i32 [[Y]]
; CHECK-NEXT: ret i32 [[SELECT]]
define i1 @positive_different_trunc_both(i32 %arg) {
; CHECK-LABEL: @positive_different_trunc_both(
-; CHECK-NEXT: [[T1:%.*]] = trunc i32 [[ARG:%.*]] to i15
-; CHECK-NEXT: [[T2:%.*]] = icmp sgt i15 [[T1]], -1
+; CHECK-NEXT: [[TMP1:%.*]] = and i32 [[ARG:%.*]], 16384
+; CHECK-NEXT: [[T2:%.*]] = icmp eq i32 [[TMP1]], 0
; CHECK-NEXT: [[T3:%.*]] = trunc i32 [[ARG]] to i16
; CHECK-NEXT: [[T4:%.*]] = add i16 [[T3]], 128
; CHECK-NEXT: [[T5:%.*]] = icmp ult i16 [[T4]], 256
define i1 @positive_different_trunc_both_logical(i32 %arg) {
; CHECK-LABEL: @positive_different_trunc_both_logical(
-; CHECK-NEXT: [[T1:%.*]] = trunc i32 [[ARG:%.*]] to i15
-; CHECK-NEXT: [[T2:%.*]] = icmp sgt i15 [[T1]], -1
+; CHECK-NEXT: [[TMP1:%.*]] = and i32 [[ARG:%.*]], 16384
+; CHECK-NEXT: [[T2:%.*]] = icmp eq i32 [[TMP1]], 0
; CHECK-NEXT: [[T3:%.*]] = trunc i32 [[ARG]] to i16
; CHECK-NEXT: [[T4:%.*]] = add i16 [[T3]], 128
; CHECK-NEXT: [[T5:%.*]] = icmp ult i16 [[T4]], 256
define i1 @negative_trunc_not_arg(i32 %arg, i32 %arg2) {
; CHECK-LABEL: @negative_trunc_not_arg(
-; CHECK-NEXT: [[T1:%.*]] = trunc i32 [[ARG:%.*]] to i8
-; CHECK-NEXT: [[T2:%.*]] = icmp sgt i8 [[T1]], -1
+; CHECK-NEXT: [[TMP1:%.*]] = and i32 [[ARG:%.*]], 128
+; CHECK-NEXT: [[T2:%.*]] = icmp eq i32 [[TMP1]], 0
; CHECK-NEXT: [[T3:%.*]] = add i32 [[ARG2:%.*]], 128
; CHECK-NEXT: [[T4:%.*]] = icmp ult i32 [[T3]], 256
; CHECK-NEXT: [[T5:%.*]] = and i1 [[T2]], [[T4]]
define i1 @negative_trunc_not_arg_logical(i32 %arg, i32 %arg2) {
; CHECK-LABEL: @negative_trunc_not_arg_logical(
-; CHECK-NEXT: [[T1:%.*]] = trunc i32 [[ARG:%.*]] to i8
-; CHECK-NEXT: [[T2:%.*]] = icmp sgt i8 [[T1]], -1
+; CHECK-NEXT: [[TMP1:%.*]] = and i32 [[ARG:%.*]], 128
+; CHECK-NEXT: [[T2:%.*]] = icmp eq i32 [[TMP1]], 0
; CHECK-NEXT: [[T3:%.*]] = add i32 [[ARG2:%.*]], 128
; CHECK-NEXT: [[T4:%.*]] = icmp ult i32 [[T3]], 256
; CHECK-NEXT: [[T5:%.*]] = select i1 [[T2]], i1 [[T4]], i1 false