ret i1 %B
}
-define i1 @ashr_icmp2(i64 %X) nounwind {
+define i1 @ashr_icmp2(i64 %X) {
; CHECK-LABEL: @ashr_icmp2(
; CHECK-NEXT: [[Z:%.*]] = icmp slt i64 %X, 16
; CHECK-NEXT: ret i1 [[Z]]
ret i1 %Z
}
+; FIXME: Vectors should fold the same way.
+define <2 x i1> @ashr_icmp2_vec(<2 x i64> %X) {
+; CHECK-LABEL: @ashr_icmp2_vec(
+; CHECK-NEXT: [[Y:%.*]] = ashr exact <2 x i64> %X, <i64 2, i64 2>
+; CHECK-NEXT: [[Z:%.*]] = icmp slt <2 x i64> [[Y]], <i64 4, i64 4>
+; CHECK-NEXT: ret <2 x i1> [[Z]]
+;
+ %Y = ashr exact <2 x i64> %X, <i64 2, i64 2>
+ %Z = icmp slt <2 x i64> %Y, <i64 4, i64 4>
+ ret <2 x i1> %Z
+}
+
; PR9998
; Make sure we don't transform the ashr here into an sdiv
define i1 @pr9998(i32 %V) nounwind {
ret i1 %cmp
}
+; FIXME: Vectors should fold the same way.
+define <2 x i1> @icmp_sext8trunc_vec(<2 x i32> %x) {
+; CHECK-LABEL: @icmp_sext8trunc_vec(
+; CHECK-NEXT: [[SEXT1:%.*]] = shl <2 x i32> %x, <i32 24, i32 24>
+; CHECK-NEXT: [[SEXT:%.*]] = ashr <2 x i32> [[SEXT:%.*]]1, <i32 24, i32 24>
+; CHECK-NEXT: [[CMP:%.*]] = icmp slt <2 x i32> [[SEXT]], <i32 36, i32 36>
+; CHECK-NEXT: ret <2 x i1> [[CMP]]
+;
+ %trunc = trunc <2 x i32> %x to <2 x i8>
+ %sext = sext <2 x i8> %trunc to <2 x i32>
+ %cmp = icmp slt <2 x i32> %sext, <i32 36, i32 36>
+ ret <2 x i1> %cmp
+}
+
define i1 @icmp_shl16(i32 %x) {
; CHECK-LABEL: @icmp_shl16(
; CHECK-NEXT: [[TMP1:%.*]] = trunc i32 %x to i16
ret i1 %cmp
}
+; FIXME: Vectors should fold the same way.
+define <2 x i1> @test_shift_and_cmp_changed1_vec(<2 x i8> %p, <2 x i8> %q) {
+; CHECK-LABEL: @test_shift_and_cmp_changed1_vec(
+; CHECK-NEXT: [[ANDP:%.*]] = and <2 x i8> %p, <i8 6, i8 6>
+; CHECK-NEXT: [[ANDQ:%.*]] = and <2 x i8> %q, <i8 8, i8 8>
+; CHECK-NEXT: [[OR:%.*]] = or <2 x i8> [[ANDQ]], [[ANDP]]
+; CHECK-NEXT: [[SHL:%.*]] = shl <2 x i8> [[OR]], <i8 5, i8 5>
+; CHECK-NEXT: [[ASHR:%.*]] = ashr <2 x i8> [[SHL]], <i8 5, i8 5>
+; CHECK-NEXT: [[CMP:%.*]] = icmp slt <2 x i8> [[ASHR]], <i8 1, i8 1>
+; CHECK-NEXT: ret <2 x i1> [[CMP]]
+;
+ %andp = and <2 x i8> %p, <i8 6, i8 6>
+ %andq = and <2 x i8> %q, <i8 8, i8 8>
+ %or = or <2 x i8> %andq, %andp
+ %shl = shl <2 x i8> %or, <i8 5, i8 5>
+ %ashr = ashr <2 x i8> %shl, <i8 5, i8 5>
+ %cmp = icmp slt <2 x i8> %ashr, <i8 1, i8 1>
+ ret <2 x i1> %cmp
+}
+
; Unsigned compare allows a transformation to compare against 0.
define i1 @test_shift_and_cmp_changed2(i8 %p) {
; CHECK-LABEL: @test_shift_and_cmp_changed2(
ret i1 %C
}
+; FIXME: Vectors should fold the same way.
+define <2 x i1> @test17vec(<2 x i32> %A) {
+; CHECK-LABEL: @test17vec(
+; CHECK-NEXT: [[B:%.*]] = lshr <2 x i32> %A, <i32 3, i32 3>
+; CHECK-NEXT: [[C:%.*]] = icmp eq <2 x i32> [[B]], <i32 1234, i32 1234>
+; CHECK-NEXT: ret <2 x i1> [[C]]
+;
+ %B = lshr <2 x i32> %A, <i32 3, i32 3>
+ %C = icmp eq <2 x i32> %B, <i32 1234, i32 1234>
+ ret <2 x i1> %C
+}
define i1 @test18(i8 %A) {
; CHECK-LABEL: @test18(
ret i1 %C
}
+; FIXME: Vectors should fold the same way.
+define <2 x i1> @test19vec(<2 x i32> %A) {
+; CHECK-LABEL: @test19vec(
+; CHECK-NEXT: [[B:%.*]] = ashr <2 x i32> %A, <i32 2, i32 2>
+; CHECK-NEXT: [[C:%.*]] = icmp eq <2 x i32> [[B]], zeroinitializer
+; CHECK-NEXT: ret <2 x i1> [[C]]
+;
+ %B = ashr <2 x i32> %A, <i32 2, i32 2>
+ %C = icmp eq <2 x i32> %B, zeroinitializer
+ ret <2 x i1> %C
+}
;; X >u ~4
define i1 @test19a(i32 %A) {