We need to produce a setcc instruction which has an 8-bit result.
This gets rid of a bunch of cases that were using the s1->s8/s16/s32/s64
handling in selectZExt.
I'm not very familiar with GlobalISel yet so I'm not yet sure
the best way to do things. I'd especially like feedback on the
best way to handle the currently split 32-bit and 64-bit mode
handling.
Differential Revision: https://reviews.llvm.org/D85814
.legalFor({{s8, s8}, {s16, s8}, {s32, s8}})
.clampScalar(0, s8, s32)
.clampScalar(1, s8, s8);
+
+ // Comparison
+ getActionDefinitionsBuilder(G_ICMP)
+ .legalForCartesianProduct({s8}, {s8, s16, s32, p0})
+ .clampScalar(0, s8, s8);
}
// Control-flow
setAction({G_ANYEXT, s128}, Legal);
getActionDefinitionsBuilder(G_SEXT_INREG).lower();
- // Comparison
- setAction({G_ICMP, s1}, Legal);
-
- for (auto Ty : {s8, s16, s32, p0})
- setAction({G_ICMP, 1, Ty}, Legal);
-
// Merge/Unmerge
for (const auto &Ty : {s16, s32, s64}) {
setAction({G_MERGE_VALUES, Ty}, Legal);
.widenScalarToNextPow2(1);
// Comparison
- setAction({G_ICMP, 1, s64}, Legal);
+ getActionDefinitionsBuilder(G_ICMP)
+ .legalForCartesianProduct({s8}, {s8, s16, s32, s64, p0})
+ .clampScalar(0, s8, s8);
getActionDefinitionsBuilder(G_FCMP)
.legalForCartesianProduct({s8}, {s32, s64})
define i32 @test_icmp_eq_i8(i8 %a, i8 %b) {
; ALL-LABEL: test_icmp_eq_i8:
; ALL: # %bb.0:
+; ALL-NEXT: xorl %eax, %eax
; ALL-NEXT: cmpb %sil, %dil
; ALL-NEXT: sete %al
; ALL-NEXT: andl $1, %eax
define i32 @test_icmp_eq_i16(i16 %a, i16 %b) {
; ALL-LABEL: test_icmp_eq_i16:
; ALL: # %bb.0:
+; ALL-NEXT: xorl %eax, %eax
; ALL-NEXT: cmpw %si, %di
; ALL-NEXT: sete %al
; ALL-NEXT: andl $1, %eax
define i32 @test_icmp_eq_i64(i64 %a, i64 %b) {
; ALL-LABEL: test_icmp_eq_i64:
; ALL: # %bb.0:
+; ALL-NEXT: xorl %eax, %eax
; ALL-NEXT: cmpq %rsi, %rdi
; ALL-NEXT: sete %al
; ALL-NEXT: andl $1, %eax
define i32 @test_icmp_eq_i32(i32 %a, i32 %b) {
; ALL-LABEL: test_icmp_eq_i32:
; ALL: # %bb.0:
+; ALL-NEXT: xorl %eax, %eax
; ALL-NEXT: cmpl %esi, %edi
; ALL-NEXT: sete %al
; ALL-NEXT: andl $1, %eax
define i32 @test_icmp_ne_i32(i32 %a, i32 %b) {
; ALL-LABEL: test_icmp_ne_i32:
; ALL: # %bb.0:
+; ALL-NEXT: xorl %eax, %eax
; ALL-NEXT: cmpl %esi, %edi
; ALL-NEXT: setne %al
; ALL-NEXT: andl $1, %eax
define i32 @test_icmp_ugt_i32(i32 %a, i32 %b) {
; ALL-LABEL: test_icmp_ugt_i32:
; ALL: # %bb.0:
+; ALL-NEXT: xorl %eax, %eax
; ALL-NEXT: cmpl %esi, %edi
; ALL-NEXT: seta %al
; ALL-NEXT: andl $1, %eax
define i32 @test_icmp_uge_i32(i32 %a, i32 %b) {
; ALL-LABEL: test_icmp_uge_i32:
; ALL: # %bb.0:
+; ALL-NEXT: xorl %eax, %eax
; ALL-NEXT: cmpl %esi, %edi
; ALL-NEXT: setae %al
; ALL-NEXT: andl $1, %eax
define i32 @test_icmp_ult_i32(i32 %a, i32 %b) {
; ALL-LABEL: test_icmp_ult_i32:
; ALL: # %bb.0:
+; ALL-NEXT: xorl %eax, %eax
; ALL-NEXT: cmpl %esi, %edi
; ALL-NEXT: setb %al
; ALL-NEXT: andl $1, %eax
define i32 @test_icmp_ule_i32(i32 %a, i32 %b) {
; ALL-LABEL: test_icmp_ule_i32:
; ALL: # %bb.0:
+; ALL-NEXT: xorl %eax, %eax
; ALL-NEXT: cmpl %esi, %edi
; ALL-NEXT: setbe %al
; ALL-NEXT: andl $1, %eax
define i32 @test_icmp_sgt_i32(i32 %a, i32 %b) {
; ALL-LABEL: test_icmp_sgt_i32:
; ALL: # %bb.0:
+; ALL-NEXT: xorl %eax, %eax
; ALL-NEXT: cmpl %esi, %edi
; ALL-NEXT: setg %al
; ALL-NEXT: andl $1, %eax
define i32 @test_icmp_sge_i32(i32 %a, i32 %b) {
; ALL-LABEL: test_icmp_sge_i32:
; ALL: # %bb.0:
+; ALL-NEXT: xorl %eax, %eax
; ALL-NEXT: cmpl %esi, %edi
; ALL-NEXT: setge %al
; ALL-NEXT: andl $1, %eax
define i32 @test_icmp_slt_i32(i32 %a, i32 %b) {
; ALL-LABEL: test_icmp_slt_i32:
; ALL: # %bb.0:
+; ALL-NEXT: xorl %eax, %eax
; ALL-NEXT: cmpl %esi, %edi
; ALL-NEXT: setl %al
; ALL-NEXT: andl $1, %eax
define i32 @test_icmp_sle_i32(i32 %a, i32 %b) {
; ALL-LABEL: test_icmp_sle_i32:
; ALL: # %bb.0:
+; ALL-NEXT: xorl %eax, %eax
; ALL-NEXT: cmpl %esi, %edi
; ALL-NEXT: setle %al
; ALL-NEXT: andl $1, %eax
; CHECK-LABEL: name: test_cmp_i8
; CHECK: [[COPY:%[0-9]+]]:_(s8) = COPY $dil
; CHECK: [[COPY1:%[0-9]+]]:_(s8) = COPY $sil
- ; CHECK: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(ult), [[COPY]](s8), [[COPY1]]
- ; CHECK: [[ZEXT:%[0-9]+]]:_(s32) = G_ZEXT [[ICMP]](s1)
- ; CHECK: $eax = COPY [[ZEXT]](s32)
+ ; CHECK: [[ICMP:%[0-9]+]]:_(s8) = G_ICMP intpred(ult), [[COPY]](s8), [[COPY1]]
+ ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+ ; CHECK: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[ICMP]](s8)
+ ; CHECK: [[AND:%[0-9]+]]:_(s32) = G_AND [[ANYEXT]], [[C]]
+ ; CHECK: $eax = COPY [[AND]](s32)
; CHECK: RET 0, implicit $eax
%0(s8) = COPY $dil
%1(s8) = COPY $sil
; CHECK-LABEL: name: test_cmp_i16
; CHECK: [[COPY:%[0-9]+]]:_(s16) = COPY $di
; CHECK: [[COPY1:%[0-9]+]]:_(s16) = COPY $si
- ; CHECK: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(ult), [[COPY]](s16), [[COPY1]]
- ; CHECK: [[ZEXT:%[0-9]+]]:_(s32) = G_ZEXT [[ICMP]](s1)
- ; CHECK: $eax = COPY [[ZEXT]](s32)
+ ; CHECK: [[ICMP:%[0-9]+]]:_(s8) = G_ICMP intpred(ult), [[COPY]](s16), [[COPY1]]
+ ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+ ; CHECK: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[ICMP]](s8)
+ ; CHECK: [[AND:%[0-9]+]]:_(s32) = G_AND [[ANYEXT]], [[C]]
+ ; CHECK: $eax = COPY [[AND]](s32)
; CHECK: RET 0, implicit $eax
%0(s16) = COPY $di
%1(s16) = COPY $si
; CHECK-LABEL: name: test_cmp_i32
; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $edi
; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY $esi
- ; CHECK: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(ult), [[COPY]](s32), [[COPY1]]
- ; CHECK: [[ZEXT:%[0-9]+]]:_(s32) = G_ZEXT [[ICMP]](s1)
- ; CHECK: $eax = COPY [[ZEXT]](s32)
+ ; CHECK: [[ICMP:%[0-9]+]]:_(s8) = G_ICMP intpred(ult), [[COPY]](s32), [[COPY1]]
+ ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+ ; CHECK: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[ICMP]](s8)
+ ; CHECK: [[AND:%[0-9]+]]:_(s32) = G_AND [[ANYEXT]], [[C]]
+ ; CHECK: $eax = COPY [[AND]](s32)
; CHECK: RET 0, implicit $eax
%0(s32) = COPY $edi
%1(s32) = COPY $esi
; CHECK-LABEL: name: test_cmp_i64
; CHECK: [[COPY:%[0-9]+]]:_(s64) = COPY $rdi
; CHECK: [[COPY1:%[0-9]+]]:_(s64) = COPY $rsi
- ; CHECK: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(ult), [[COPY]](s64), [[COPY1]]
- ; CHECK: [[ZEXT:%[0-9]+]]:_(s32) = G_ZEXT [[ICMP]](s1)
- ; CHECK: $eax = COPY [[ZEXT]](s32)
+ ; CHECK: [[ICMP:%[0-9]+]]:_(s8) = G_ICMP intpred(ult), [[COPY]](s64), [[COPY1]]
+ ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+ ; CHECK: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[ICMP]](s8)
+ ; CHECK: [[AND:%[0-9]+]]:_(s32) = G_AND [[ANYEXT]], [[C]]
+ ; CHECK: $eax = COPY [[AND]](s32)
; CHECK: RET 0, implicit $eax
%0(s64) = COPY $rdi
%1(s64) = COPY $rsi
; CHECK-LABEL: name: test_cmp_p0
; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY $rdi
; CHECK: [[COPY1:%[0-9]+]]:_(p0) = COPY $rsi
- ; CHECK: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(ult), [[COPY]](p0), [[COPY1]]
- ; CHECK: [[ZEXT:%[0-9]+]]:_(s32) = G_ZEXT [[ICMP]](s1)
- ; CHECK: $eax = COPY [[ZEXT]](s32)
+ ; CHECK: [[ICMP:%[0-9]+]]:_(s8) = G_ICMP intpred(ult), [[COPY]](p0), [[COPY1]]
+ ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+ ; CHECK: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[ICMP]](s8)
+ ; CHECK: [[AND:%[0-9]+]]:_(s32) = G_AND [[ANYEXT]], [[C]]
+ ; CHECK: $eax = COPY [[AND]](s32)
; CHECK: RET 0, implicit $eax
%0(p0) = COPY $rdi
%1(p0) = COPY $rsi
; ALL: [[COPY1:%[0-9]+]]:_(s32) = COPY $esi
; ALL: [[COPY2:%[0-9]+]]:_(s32) = COPY $edx
; ALL: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
- ; ALL: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(sgt), [[COPY]](s32), [[C]]
- ; ALL: [[TRUNC:%[0-9]+]]:_(s8) = G_TRUNC [[COPY1]](s32)
- ; ALL: G_BRCOND [[ICMP]](s1), %bb.2
+ ; ALL: [[ICMP:%[0-9]+]]:_(s8) = G_ICMP intpred(sgt), [[COPY]](s32), [[C]]
+ ; ALL: [[TRUNC:%[0-9]+]]:_(s1) = G_TRUNC [[ICMP]](s8)
+ ; ALL: [[TRUNC1:%[0-9]+]]:_(s8) = G_TRUNC [[COPY1]](s32)
+ ; ALL: G_BRCOND [[TRUNC]](s1), %bb.2
; ALL: bb.1.cond.false:
; ALL: successors: %bb.2(0x80000000)
- ; ALL: [[TRUNC1:%[0-9]+]]:_(s8) = G_TRUNC [[COPY2]](s32)
+ ; ALL: [[TRUNC2:%[0-9]+]]:_(s8) = G_TRUNC [[COPY2]](s32)
; ALL: bb.2.cond.end:
- ; ALL: [[PHI:%[0-9]+]]:_(s8) = G_PHI [[TRUNC1]](s8), %bb.1, [[TRUNC]](s8), %bb.0
+ ; ALL: [[PHI:%[0-9]+]]:_(s8) = G_PHI [[TRUNC2]](s8), %bb.1, [[TRUNC1]](s8), %bb.0
; ALL: [[COPY3:%[0-9]+]]:_(s8) = COPY [[PHI]](s8)
; ALL: $al = COPY [[COPY3]](s8)
; ALL: RET 0, implicit $al
; ALL: [[COPY2:%[0-9]+]]:_(s32) = COPY $edx
; ALL: [[TRUNC1:%[0-9]+]]:_(s8) = G_TRUNC [[COPY2]](s32)
; ALL: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
- ; ALL: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(sgt), [[COPY]](s32), [[C]]
- ; ALL: G_BRCOND [[ICMP]](s1), %bb.2
+ ; ALL: [[ICMP:%[0-9]+]]:_(s8) = G_ICMP intpred(sgt), [[COPY]](s32), [[C]]
+ ; ALL: [[TRUNC2:%[0-9]+]]:_(s1) = G_TRUNC [[ICMP]](s8)
+ ; ALL: G_BRCOND [[TRUNC2]](s1), %bb.2
; ALL: bb.1.cond.false:
; ALL: successors: %bb.2(0x80000000)
; ALL: bb.2.cond.end:
; ALL: [[COPY2:%[0-9]+]]:_(s32) = COPY $edx
; ALL: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY2]](s32)
; ALL: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
- ; ALL: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(sgt), [[COPY]](s32), [[C]]
- ; ALL: G_BRCOND [[ICMP]](s1), %bb.2
+ ; ALL: [[ICMP:%[0-9]+]]:_(s8) = G_ICMP intpred(sgt), [[COPY]](s32), [[C]]
+ ; ALL: [[TRUNC2:%[0-9]+]]:_(s1) = G_TRUNC [[ICMP]](s8)
+ ; ALL: G_BRCOND [[TRUNC2]](s1), %bb.2
; ALL: bb.1.cond.false:
; ALL: successors: %bb.2(0x80000000)
; ALL: bb.2.cond.end:
; ALL: [[COPY1:%[0-9]+]]:_(s32) = COPY $esi
; ALL: [[COPY2:%[0-9]+]]:_(s32) = COPY $edx
; ALL: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
- ; ALL: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(sgt), [[COPY]](s32), [[C]]
- ; ALL: G_BRCOND [[ICMP]](s1), %bb.1
+ ; ALL: [[ICMP:%[0-9]+]]:_(s8) = G_ICMP intpred(sgt), [[COPY]](s32), [[C]]
+ ; ALL: [[TRUNC:%[0-9]+]]:_(s1) = G_TRUNC [[ICMP]](s8)
+ ; ALL: G_BRCOND [[TRUNC]](s1), %bb.1
; ALL: G_BR %bb.2
; ALL: bb.1.cond.true:
; ALL: successors: %bb.3(0x80000000)
; ALL: [[COPY1:%[0-9]+]]:_(s64) = COPY $rsi
; ALL: [[COPY2:%[0-9]+]]:_(s64) = COPY $rdx
; ALL: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
- ; ALL: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(sgt), [[COPY]](s32), [[C]]
- ; ALL: G_BRCOND [[ICMP]](s1), %bb.1
+ ; ALL: [[ICMP:%[0-9]+]]:_(s8) = G_ICMP intpred(sgt), [[COPY]](s32), [[C]]
+ ; ALL: [[TRUNC:%[0-9]+]]:_(s1) = G_TRUNC [[ICMP]](s8)
+ ; ALL: G_BRCOND [[TRUNC]](s1), %bb.1
; ALL: G_BR %bb.2
; ALL: bb.1.cond.true:
; ALL: successors: %bb.3(0x80000000)
; ALL: [[COPY2:%[0-9]+]]:_(s128) = COPY $xmm1
; ALL: [[TRUNC1:%[0-9]+]]:_(s32) = G_TRUNC [[COPY2]](s128)
; ALL: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
- ; ALL: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(sgt), [[COPY]](s32), [[C]]
- ; ALL: G_BRCOND [[ICMP]](s1), %bb.2
+ ; ALL: [[ICMP:%[0-9]+]]:_(s8) = G_ICMP intpred(sgt), [[COPY]](s32), [[C]]
+ ; ALL: [[TRUNC2:%[0-9]+]]:_(s1) = G_TRUNC [[ICMP]](s8)
+ ; ALL: G_BRCOND [[TRUNC2]](s1), %bb.2
; ALL: bb.1.cond.false:
; ALL: successors: %bb.2(0x80000000)
; ALL: bb.2.cond.end:
; ALL: [[COPY2:%[0-9]+]]:_(s128) = COPY $xmm1
; ALL: [[TRUNC1:%[0-9]+]]:_(s64) = G_TRUNC [[COPY2]](s128)
; ALL: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
- ; ALL: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(sgt), [[COPY]](s32), [[C]]
- ; ALL: G_BRCOND [[ICMP]](s1), %bb.2
+ ; ALL: [[ICMP:%[0-9]+]]:_(s8) = G_ICMP intpred(sgt), [[COPY]](s32), [[C]]
+ ; ALL: [[TRUNC2:%[0-9]+]]:_(s1) = G_TRUNC [[ICMP]](s8)
+ ; ALL: G_BRCOND [[TRUNC2]](s1), %bb.2
; ALL: bb.1.cond.false:
; ALL: successors: %bb.2(0x80000000)
; ALL: bb.2.cond.end:
; FAST-LABEL: name: test_gep
; FAST: [[DEF:%[0-9]+]]:gpr(p0) = IMPLICIT_DEF
; FAST: [[C:%[0-9]+]]:gpr(s32) = G_CONSTANT i32 20
- ; FAST: [[GEP:%[0-9]+]]:gpr(p0) = G_PTR_ADD [[DEF]], [[C]](s32)
+ ; FAST: [[PTR_ADD:%[0-9]+]]:gpr(p0) = G_PTR_ADD [[DEF]], [[C]](s32)
; FAST: [[C1:%[0-9]+]]:gpr(s64) = G_CONSTANT i64 20
- ; FAST: [[GEP1:%[0-9]+]]:gpr(p0) = G_PTR_ADD [[DEF]], [[C1]](s64)
+ ; FAST: [[PTR_ADD1:%[0-9]+]]:gpr(p0) = G_PTR_ADD [[DEF]], [[C1]](s64)
; FAST: RET 0
; GREEDY-LABEL: name: test_gep
; GREEDY: [[DEF:%[0-9]+]]:gpr(p0) = IMPLICIT_DEF
; GREEDY: [[C:%[0-9]+]]:gpr(s32) = G_CONSTANT i32 20
- ; GREEDY: [[GEP:%[0-9]+]]:gpr(p0) = G_PTR_ADD [[DEF]], [[C]](s32)
+ ; GREEDY: [[PTR_ADD:%[0-9]+]]:gpr(p0) = G_PTR_ADD [[DEF]], [[C]](s32)
; GREEDY: [[C1:%[0-9]+]]:gpr(s64) = G_CONSTANT i64 20
- ; GREEDY: [[GEP1:%[0-9]+]]:gpr(p0) = G_PTR_ADD [[DEF]], [[C1]](s64)
+ ; GREEDY: [[PTR_ADD1:%[0-9]+]]:gpr(p0) = G_PTR_ADD [[DEF]], [[C1]](s64)
; GREEDY: RET 0
%0(p0) = IMPLICIT_DEF
%1(s32) = G_CONSTANT i32 20
; FAST: [[TRUNC:%[0-9]+]]:gpr(s8) = G_TRUNC [[COPY]](s32)
; FAST: [[COPY1:%[0-9]+]]:gpr(s32) = COPY $esi
; FAST: [[TRUNC1:%[0-9]+]]:gpr(s8) = G_TRUNC [[COPY1]](s32)
- ; FAST: [[ICMP:%[0-9]+]]:gpr(s1) = G_ICMP intpred(eq), [[TRUNC]](s8), [[TRUNC1]]
- ; FAST: [[ANYEXT:%[0-9]+]]:gpr(s8) = G_ANYEXT [[ICMP]](s1)
- ; FAST: $al = COPY [[ANYEXT]](s8)
+ ; FAST: [[ICMP:%[0-9]+]]:gpr(s8) = G_ICMP intpred(eq), [[TRUNC]](s8), [[TRUNC1]]
+ ; FAST: [[COPY2:%[0-9]+]]:gpr(s8) = COPY [[ICMP]](s8)
+ ; FAST: $al = COPY [[COPY2]](s8)
; FAST: RET 0, implicit $al
; GREEDY-LABEL: name: test_icmp_eq_i8
; GREEDY: [[COPY:%[0-9]+]]:gpr(s32) = COPY $edi
; GREEDY: [[TRUNC:%[0-9]+]]:gpr(s8) = G_TRUNC [[COPY]](s32)
; GREEDY: [[COPY1:%[0-9]+]]:gpr(s32) = COPY $esi
; GREEDY: [[TRUNC1:%[0-9]+]]:gpr(s8) = G_TRUNC [[COPY1]](s32)
- ; GREEDY: [[ICMP:%[0-9]+]]:gpr(s1) = G_ICMP intpred(eq), [[TRUNC]](s8), [[TRUNC1]]
- ; GREEDY: [[ANYEXT:%[0-9]+]]:gpr(s8) = G_ANYEXT [[ICMP]](s1)
- ; GREEDY: $al = COPY [[ANYEXT]](s8)
+ ; GREEDY: [[ICMP:%[0-9]+]]:gpr(s8) = G_ICMP intpred(eq), [[TRUNC]](s8), [[TRUNC1]]
+ ; GREEDY: [[COPY2:%[0-9]+]]:gpr(s8) = COPY [[ICMP]](s8)
+ ; GREEDY: $al = COPY [[COPY2]](s8)
; GREEDY: RET 0, implicit $al
%2:_(s32) = COPY $edi
%0:_(s8) = G_TRUNC %2(s32)
%3:_(s32) = COPY $esi
%1:_(s8) = G_TRUNC %3(s32)
- %4:_(s1) = G_ICMP intpred(eq), %0(s8), %1
- %5:_(s8) = G_ANYEXT %4(s1)
+ %6:_(s8) = G_ICMP intpred(eq), %0(s8), %1
+ %5:_(s8) = COPY %6(s8)
$al = COPY %5(s8)
RET 0, implicit $al
; FAST: [[TRUNC:%[0-9]+]]:gpr(s16) = G_TRUNC [[COPY]](s32)
; FAST: [[COPY1:%[0-9]+]]:gpr(s32) = COPY $esi
; FAST: [[TRUNC1:%[0-9]+]]:gpr(s16) = G_TRUNC [[COPY1]](s32)
- ; FAST: [[ICMP:%[0-9]+]]:gpr(s1) = G_ICMP intpred(eq), [[TRUNC]](s16), [[TRUNC1]]
- ; FAST: [[ANYEXT:%[0-9]+]]:gpr(s8) = G_ANYEXT [[ICMP]](s1)
- ; FAST: $al = COPY [[ANYEXT]](s8)
+ ; FAST: [[ICMP:%[0-9]+]]:gpr(s8) = G_ICMP intpred(eq), [[TRUNC]](s16), [[TRUNC1]]
+ ; FAST: [[COPY2:%[0-9]+]]:gpr(s8) = COPY [[ICMP]](s8)
+ ; FAST: $al = COPY [[COPY2]](s8)
; FAST: RET 0, implicit $al
; GREEDY-LABEL: name: test_icmp_eq_i16
; GREEDY: [[COPY:%[0-9]+]]:gpr(s32) = COPY $edi
; GREEDY: [[TRUNC:%[0-9]+]]:gpr(s16) = G_TRUNC [[COPY]](s32)
; GREEDY: [[COPY1:%[0-9]+]]:gpr(s32) = COPY $esi
; GREEDY: [[TRUNC1:%[0-9]+]]:gpr(s16) = G_TRUNC [[COPY1]](s32)
- ; GREEDY: [[ICMP:%[0-9]+]]:gpr(s1) = G_ICMP intpred(eq), [[TRUNC]](s16), [[TRUNC1]]
- ; GREEDY: [[ANYEXT:%[0-9]+]]:gpr(s8) = G_ANYEXT [[ICMP]](s1)
- ; GREEDY: $al = COPY [[ANYEXT]](s8)
+ ; GREEDY: [[ICMP:%[0-9]+]]:gpr(s8) = G_ICMP intpred(eq), [[TRUNC]](s16), [[TRUNC1]]
+ ; GREEDY: [[COPY2:%[0-9]+]]:gpr(s8) = COPY [[ICMP]](s8)
+ ; GREEDY: $al = COPY [[COPY2]](s8)
; GREEDY: RET 0, implicit $al
%2:_(s32) = COPY $edi
%0:_(s16) = G_TRUNC %2(s32)
%3:_(s32) = COPY $esi
%1:_(s16) = G_TRUNC %3(s32)
- %4:_(s1) = G_ICMP intpred(eq), %0(s16), %1
- %5:_(s8) = G_ANYEXT %4(s1)
+ %6:_(s8) = G_ICMP intpred(eq), %0(s16), %1
+ %5:_(s8) = COPY %6(s8)
$al = COPY %5(s8)
RET 0, implicit $al
; FAST-LABEL: name: test_icmp_eq_i32
; FAST: [[COPY:%[0-9]+]]:gpr(s32) = COPY $edi
; FAST: [[COPY1:%[0-9]+]]:gpr(s32) = COPY $esi
- ; FAST: [[ICMP:%[0-9]+]]:gpr(s1) = G_ICMP intpred(eq), [[COPY]](s32), [[COPY1]]
- ; FAST: [[ANYEXT:%[0-9]+]]:gpr(s8) = G_ANYEXT [[ICMP]](s1)
- ; FAST: $al = COPY [[ANYEXT]](s8)
+ ; FAST: [[ICMP:%[0-9]+]]:gpr(s8) = G_ICMP intpred(eq), [[COPY]](s32), [[COPY1]]
+ ; FAST: [[COPY2:%[0-9]+]]:gpr(s8) = COPY [[ICMP]](s8)
+ ; FAST: $al = COPY [[COPY2]](s8)
; FAST: RET 0, implicit $al
; GREEDY-LABEL: name: test_icmp_eq_i32
; GREEDY: [[COPY:%[0-9]+]]:gpr(s32) = COPY $edi
; GREEDY: [[COPY1:%[0-9]+]]:gpr(s32) = COPY $esi
- ; GREEDY: [[ICMP:%[0-9]+]]:gpr(s1) = G_ICMP intpred(eq), [[COPY]](s32), [[COPY1]]
- ; GREEDY: [[ANYEXT:%[0-9]+]]:gpr(s8) = G_ANYEXT [[ICMP]](s1)
- ; GREEDY: $al = COPY [[ANYEXT]](s8)
+ ; GREEDY: [[ICMP:%[0-9]+]]:gpr(s8) = G_ICMP intpred(eq), [[COPY]](s32), [[COPY1]]
+ ; GREEDY: [[COPY2:%[0-9]+]]:gpr(s8) = COPY [[ICMP]](s8)
+ ; GREEDY: $al = COPY [[COPY2]](s8)
; GREEDY: RET 0, implicit $al
%0:_(s32) = COPY $edi
%1:_(s32) = COPY $esi
- %2:_(s1) = G_ICMP intpred(eq), %0(s32), %1
- %3:_(s8) = G_ANYEXT %2(s1)
+ %4:_(s8) = G_ICMP intpred(eq), %0(s32), %1
+ %3:_(s8) = COPY %4(s8)
$al = COPY %3(s8)
RET 0, implicit $al
; FAST-LABEL: name: test_icmp_eq_i64
; FAST: [[COPY:%[0-9]+]]:gpr(s64) = COPY $rdi
; FAST: [[COPY1:%[0-9]+]]:gpr(s64) = COPY $rsi
- ; FAST: [[ICMP:%[0-9]+]]:gpr(s1) = G_ICMP intpred(eq), [[COPY]](s64), [[COPY1]]
- ; FAST: [[ANYEXT:%[0-9]+]]:gpr(s8) = G_ANYEXT [[ICMP]](s1)
- ; FAST: $al = COPY [[ANYEXT]](s8)
+ ; FAST: [[ICMP:%[0-9]+]]:gpr(s8) = G_ICMP intpred(eq), [[COPY]](s64), [[COPY1]]
+ ; FAST: [[COPY2:%[0-9]+]]:gpr(s8) = COPY [[ICMP]](s8)
+ ; FAST: $al = COPY [[COPY2]](s8)
; FAST: RET 0, implicit $al
; GREEDY-LABEL: name: test_icmp_eq_i64
; GREEDY: [[COPY:%[0-9]+]]:gpr(s64) = COPY $rdi
; GREEDY: [[COPY1:%[0-9]+]]:gpr(s64) = COPY $rsi
- ; GREEDY: [[ICMP:%[0-9]+]]:gpr(s1) = G_ICMP intpred(eq), [[COPY]](s64), [[COPY1]]
- ; GREEDY: [[ANYEXT:%[0-9]+]]:gpr(s8) = G_ANYEXT [[ICMP]](s1)
- ; GREEDY: $al = COPY [[ANYEXT]](s8)
+ ; GREEDY: [[ICMP:%[0-9]+]]:gpr(s8) = G_ICMP intpred(eq), [[COPY]](s64), [[COPY1]]
+ ; GREEDY: [[COPY2:%[0-9]+]]:gpr(s8) = COPY [[ICMP]](s8)
+ ; GREEDY: $al = COPY [[COPY2]](s8)
; GREEDY: RET 0, implicit $al
%0:_(s64) = COPY $rdi
%1:_(s64) = COPY $rsi
- %2:_(s1) = G_ICMP intpred(eq), %0(s64), %1
- %3:_(s8) = G_ANYEXT %2(s1)
+ %4:_(s8) = G_ICMP intpred(eq), %0(s64), %1
+ %3:_(s8) = COPY %4(s8)
$al = COPY %3(s8)
RET 0, implicit $al
- { id: 3, class: _, preferred-register: '' }
- { id: 4, class: _, preferred-register: '' }
- { id: 5, class: _, preferred-register: '' }
+ - { id: 6, class: _, preferred-register: '' }
body: |
; FAST-LABEL: name: test_i32
; FAST: bb.0.entry:
; FAST: [[COPY1:%[0-9]+]]:gpr(s32) = COPY $esi
; FAST: [[COPY2:%[0-9]+]]:gpr(s32) = COPY $edx
; FAST: [[C:%[0-9]+]]:gpr(s32) = G_CONSTANT i32 0
- ; FAST: [[ICMP:%[0-9]+]]:gpr(s1) = G_ICMP intpred(sgt), [[COPY]](s32), [[C]]
- ; FAST: G_BRCOND [[ICMP]](s1), %bb.1
+ ; FAST: [[ICMP:%[0-9]+]]:gpr(s8) = G_ICMP intpred(sgt), [[COPY]](s32), [[C]]
+ ; FAST: [[TRUNC:%[0-9]+]]:gpr(s1) = G_TRUNC [[ICMP]](s8)
+ ; FAST: G_BRCOND [[TRUNC]](s1), %bb.1
; FAST: G_BR %bb.2
; FAST: bb.1.cond.true:
; FAST: successors: %bb.3(0x80000000)
; GREEDY: [[COPY1:%[0-9]+]]:gpr(s32) = COPY $esi
; GREEDY: [[COPY2:%[0-9]+]]:gpr(s32) = COPY $edx
; GREEDY: [[C:%[0-9]+]]:gpr(s32) = G_CONSTANT i32 0
- ; GREEDY: [[ICMP:%[0-9]+]]:gpr(s1) = G_ICMP intpred(sgt), [[COPY]](s32), [[C]]
- ; GREEDY: G_BRCOND [[ICMP]](s1), %bb.1
+ ; GREEDY: [[ICMP:%[0-9]+]]:gpr(s8) = G_ICMP intpred(sgt), [[COPY]](s32), [[C]]
+ ; GREEDY: [[TRUNC:%[0-9]+]]:gpr(s1) = G_TRUNC [[ICMP]](s8)
+ ; GREEDY: G_BRCOND [[TRUNC]](s1), %bb.1
; GREEDY: G_BR %bb.2
; GREEDY: bb.1.cond.true:
; GREEDY: successors: %bb.3(0x80000000)
%1(s32) = COPY $esi
%2(s32) = COPY $edx
%3(s32) = G_CONSTANT i32 0
- %4(s1) = G_ICMP intpred(sgt), %0(s32), %3
+ %6(s8) = G_ICMP intpred(sgt), %0(s32), %3
+ %4(s1) = G_TRUNC %6(s8)
G_BRCOND %4(s1), %bb.1
G_BR %bb.2
; FAST: [[COPY2:%[0-9]+]]:vecr(s128) = COPY $xmm1
; FAST: [[TRUNC1:%[0-9]+]]:vecr(s32) = G_TRUNC [[COPY2]](s128)
; FAST: [[C:%[0-9]+]]:gpr(s32) = G_CONSTANT i32 0
- ; FAST: [[ICMP:%[0-9]+]]:gpr(s1) = G_ICMP intpred(sgt), [[COPY]](s32), [[C]]
- ; FAST: G_BRCOND [[ICMP]](s1), %bb.2
+ ; FAST: [[ICMP:%[0-9]+]]:gpr(s8) = G_ICMP intpred(sgt), [[COPY]](s32), [[C]]
+ ; FAST: [[TRUNC2:%[0-9]+]]:gpr(s1) = G_TRUNC [[ICMP]](s8)
+ ; FAST: G_BRCOND [[TRUNC2]](s1), %bb.2
; FAST: bb.1.cond.false:
; FAST: successors: %bb.2(0x80000000)
; FAST: bb.2.cond.end:
; GREEDY: [[COPY2:%[0-9]+]]:vecr(s128) = COPY $xmm1
; GREEDY: [[TRUNC1:%[0-9]+]]:vecr(s32) = G_TRUNC [[COPY2]](s128)
; GREEDY: [[C:%[0-9]+]]:gpr(s32) = G_CONSTANT i32 0
- ; GREEDY: [[ICMP:%[0-9]+]]:gpr(s1) = G_ICMP intpred(sgt), [[COPY]](s32), [[C]]
- ; GREEDY: G_BRCOND [[ICMP]](s1), %bb.2
+ ; GREEDY: [[ICMP:%[0-9]+]]:gpr(s8) = G_ICMP intpred(sgt), [[COPY]](s32), [[C]]
+ ; GREEDY: [[TRUNC2:%[0-9]+]]:gpr(s1) = G_TRUNC [[ICMP]](s8)
+ ; GREEDY: G_BRCOND [[TRUNC2]](s1), %bb.2
; GREEDY: bb.1.cond.false:
; GREEDY: successors: %bb.2(0x80000000)
; GREEDY: bb.2.cond.end:
%4:_(s128) = COPY $xmm1
%2:_(s32) = G_TRUNC %4(s128)
%5:_(s32) = G_CONSTANT i32 0
- %6:_(s1) = G_ICMP intpred(sgt), %0(s32), %5
+ %9:_(s8) = G_ICMP intpred(sgt), %0(s32), %5
+ %6:_(s1) = G_TRUNC %9(s8)
G_BRCOND %6(s1), %bb.3
bb.2.cond.false:
registers:
- { id: 0, class: gpr }
- { id: 1, class: gpr }
- - { id: 2, class: gpr }
+ - { id: 2, class: _ }
- { id: 3, class: gpr }
+ - { id: 4, class: gpr }
+ - { id: 5, class: gpr }
+ - { id: 6, class: gpr }
body: |
bb.1 (%ir-block.0):
liveins: $edi, $esi
; CHECK: [[COPY1:%[0-9]+]]:gr8 = COPY $sil
; CHECK: CMP8rr [[COPY]], [[COPY1]], implicit-def $eflags
; CHECK: [[SETCCr:%[0-9]+]]:gr8 = SETCCr 4, implicit $eflags
- ; CHECK: [[DEF:%[0-9]+]]:gr32 = IMPLICIT_DEF
- ; CHECK: [[INSERT_SUBREG:%[0-9]+]]:gr32 = INSERT_SUBREG [[DEF]], [[SETCCr]], %subreg.sub_8bit
- ; CHECK: [[AND32ri8_:%[0-9]+]]:gr32 = AND32ri8 [[INSERT_SUBREG]], 1, implicit-def $eflags
+ ; CHECK: [[MOVZX32rr8_:%[0-9]+]]:gr32 = MOVZX32rr8 [[SETCCr]]
+ ; CHECK: [[AND32ri8_:%[0-9]+]]:gr32 = AND32ri8 [[MOVZX32rr8_]], 1, implicit-def $eflags
; CHECK: $eax = COPY [[AND32ri8_]]
; CHECK: RET 0, implicit $eax
%0(s8) = COPY $dil
%1(s8) = COPY $sil
- %2(s1) = G_ICMP intpred(eq), %0(s8), %1
- %3(s32) = G_ZEXT %2(s1)
+ %4(s8) = G_ICMP intpred(eq), %0(s8), %1
+ %5(s32) = G_CONSTANT i32 1
+ %6(s32) = G_ANYEXT %4(s8)
+ %3(s32) = G_AND %6, %5
$eax = COPY %3(s32)
RET 0, implicit $eax
registers:
- { id: 0, class: gpr }
- { id: 1, class: gpr }
- - { id: 2, class: gpr }
+ - { id: 2, class: _ }
- { id: 3, class: gpr }
+ - { id: 4, class: gpr }
+ - { id: 5, class: gpr }
+ - { id: 6, class: gpr }
body: |
bb.1 (%ir-block.0):
liveins: $edi, $esi
; CHECK: [[COPY1:%[0-9]+]]:gr16 = COPY $si
; CHECK: CMP16rr [[COPY]], [[COPY1]], implicit-def $eflags
; CHECK: [[SETCCr:%[0-9]+]]:gr8 = SETCCr 4, implicit $eflags
- ; CHECK: [[DEF:%[0-9]+]]:gr32 = IMPLICIT_DEF
- ; CHECK: [[INSERT_SUBREG:%[0-9]+]]:gr32 = INSERT_SUBREG [[DEF]], [[SETCCr]], %subreg.sub_8bit
- ; CHECK: [[AND32ri8_:%[0-9]+]]:gr32 = AND32ri8 [[INSERT_SUBREG]], 1, implicit-def $eflags
+ ; CHECK: [[MOVZX32rr8_:%[0-9]+]]:gr32 = MOVZX32rr8 [[SETCCr]]
+ ; CHECK: [[AND32ri8_:%[0-9]+]]:gr32 = AND32ri8 [[MOVZX32rr8_]], 1, implicit-def $eflags
; CHECK: $eax = COPY [[AND32ri8_]]
; CHECK: RET 0, implicit $eax
%0(s16) = COPY $di
%1(s16) = COPY $si
- %2(s1) = G_ICMP intpred(eq), %0(s16), %1
- %3(s32) = G_ZEXT %2(s1)
+ %4(s8) = G_ICMP intpred(eq), %0(s16), %1
+ %5(s32) = G_CONSTANT i32 1
+ %6(s32) = G_ANYEXT %4(s8)
+ %3(s32) = G_AND %6, %5
$eax = COPY %3(s32)
RET 0, implicit $eax
registers:
- { id: 0, class: gpr }
- { id: 1, class: gpr }
- - { id: 2, class: gpr }
+ - { id: 2, class: _ }
- { id: 3, class: gpr }
+ - { id: 4, class: gpr }
+ - { id: 5, class: gpr }
+ - { id: 6, class: gpr }
body: |
bb.1 (%ir-block.0):
liveins: $rdi, $rsi
; CHECK: [[COPY1:%[0-9]+]]:gr64 = COPY $rsi
; CHECK: CMP64rr [[COPY]], [[COPY1]], implicit-def $eflags
; CHECK: [[SETCCr:%[0-9]+]]:gr8 = SETCCr 4, implicit $eflags
- ; CHECK: [[DEF:%[0-9]+]]:gr32 = IMPLICIT_DEF
- ; CHECK: [[INSERT_SUBREG:%[0-9]+]]:gr32 = INSERT_SUBREG [[DEF]], [[SETCCr]], %subreg.sub_8bit
- ; CHECK: [[AND32ri8_:%[0-9]+]]:gr32 = AND32ri8 [[INSERT_SUBREG]], 1, implicit-def $eflags
+ ; CHECK: [[MOVZX32rr8_:%[0-9]+]]:gr32 = MOVZX32rr8 [[SETCCr]]
+ ; CHECK: [[AND32ri8_:%[0-9]+]]:gr32 = AND32ri8 [[MOVZX32rr8_]], 1, implicit-def $eflags
; CHECK: $eax = COPY [[AND32ri8_]]
; CHECK: RET 0, implicit $eax
%0(s64) = COPY $rdi
%1(s64) = COPY $rsi
- %2(s1) = G_ICMP intpred(eq), %0(s64), %1
- %3(s32) = G_ZEXT %2(s1)
+ %4(s8) = G_ICMP intpred(eq), %0(s64), %1
+ %5(s32) = G_CONSTANT i32 1
+ %6(s32) = G_ANYEXT %4(s8)
+ %3(s32) = G_AND %6, %5
$eax = COPY %3(s32)
RET 0, implicit $eax
registers:
- { id: 0, class: gpr }
- { id: 1, class: gpr }
- - { id: 2, class: gpr }
+ - { id: 2, class: _ }
- { id: 3, class: gpr }
+ - { id: 4, class: gpr }
+ - { id: 5, class: gpr }
+ - { id: 6, class: gpr }
body: |
bb.1 (%ir-block.0):
liveins: $edi, $esi
; CHECK: [[COPY1:%[0-9]+]]:gr32 = COPY $esi
; CHECK: CMP32rr [[COPY]], [[COPY1]], implicit-def $eflags
; CHECK: [[SETCCr:%[0-9]+]]:gr8 = SETCCr 4, implicit $eflags
- ; CHECK: [[DEF:%[0-9]+]]:gr32 = IMPLICIT_DEF
- ; CHECK: [[INSERT_SUBREG:%[0-9]+]]:gr32 = INSERT_SUBREG [[DEF]], [[SETCCr]], %subreg.sub_8bit
- ; CHECK: [[AND32ri8_:%[0-9]+]]:gr32 = AND32ri8 [[INSERT_SUBREG]], 1, implicit-def $eflags
+ ; CHECK: [[MOVZX32rr8_:%[0-9]+]]:gr32 = MOVZX32rr8 [[SETCCr]]
+ ; CHECK: [[AND32ri8_:%[0-9]+]]:gr32 = AND32ri8 [[MOVZX32rr8_]], 1, implicit-def $eflags
; CHECK: $eax = COPY [[AND32ri8_]]
; CHECK: RET 0, implicit $eax
%0(s32) = COPY $edi
%1(s32) = COPY $esi
- %2(s1) = G_ICMP intpred(eq), %0(s32), %1
- %3(s32) = G_ZEXT %2(s1)
+ %4(s8) = G_ICMP intpred(eq), %0(s32), %1
+ %5(s32) = G_CONSTANT i32 1
+ %6(s32) = G_ANYEXT %4(s8)
+ %3(s32) = G_AND %6, %5
$eax = COPY %3(s32)
RET 0, implicit $eax
registers:
- { id: 0, class: gpr }
- { id: 1, class: gpr }
- - { id: 2, class: gpr }
+ - { id: 2, class: _ }
- { id: 3, class: gpr }
+ - { id: 4, class: gpr }
+ - { id: 5, class: gpr }
+ - { id: 6, class: gpr }
body: |
bb.1 (%ir-block.0):
liveins: $edi, $esi
; CHECK: [[COPY1:%[0-9]+]]:gr32 = COPY $esi
; CHECK: CMP32rr [[COPY]], [[COPY1]], implicit-def $eflags
; CHECK: [[SETCCr:%[0-9]+]]:gr8 = SETCCr 5, implicit $eflags
- ; CHECK: [[DEF:%[0-9]+]]:gr32 = IMPLICIT_DEF
- ; CHECK: [[INSERT_SUBREG:%[0-9]+]]:gr32 = INSERT_SUBREG [[DEF]], [[SETCCr]], %subreg.sub_8bit
- ; CHECK: [[AND32ri8_:%[0-9]+]]:gr32 = AND32ri8 [[INSERT_SUBREG]], 1, implicit-def $eflags
+ ; CHECK: [[MOVZX32rr8_:%[0-9]+]]:gr32 = MOVZX32rr8 [[SETCCr]]
+ ; CHECK: [[AND32ri8_:%[0-9]+]]:gr32 = AND32ri8 [[MOVZX32rr8_]], 1, implicit-def $eflags
; CHECK: $eax = COPY [[AND32ri8_]]
; CHECK: RET 0, implicit $eax
%0(s32) = COPY $edi
%1(s32) = COPY $esi
- %2(s1) = G_ICMP intpred(ne), %0(s32), %1
- %3(s32) = G_ZEXT %2(s1)
+ %4(s8) = G_ICMP intpred(ne), %0(s32), %1
+ %5(s32) = G_CONSTANT i32 1
+ %6(s32) = G_ANYEXT %4(s8)
+ %3(s32) = G_AND %6, %5
$eax = COPY %3(s32)
RET 0, implicit $eax
registers:
- { id: 0, class: gpr }
- { id: 1, class: gpr }
- - { id: 2, class: gpr }
+ - { id: 2, class: _ }
- { id: 3, class: gpr }
+ - { id: 4, class: gpr }
+ - { id: 5, class: gpr }
+ - { id: 6, class: gpr }
body: |
bb.1 (%ir-block.0):
liveins: $edi, $esi
; CHECK: [[COPY1:%[0-9]+]]:gr32 = COPY $esi
; CHECK: CMP32rr [[COPY]], [[COPY1]], implicit-def $eflags
; CHECK: [[SETCCr:%[0-9]+]]:gr8 = SETCCr 7, implicit $eflags
- ; CHECK: [[DEF:%[0-9]+]]:gr32 = IMPLICIT_DEF
- ; CHECK: [[INSERT_SUBREG:%[0-9]+]]:gr32 = INSERT_SUBREG [[DEF]], [[SETCCr]], %subreg.sub_8bit
- ; CHECK: [[AND32ri8_:%[0-9]+]]:gr32 = AND32ri8 [[INSERT_SUBREG]], 1, implicit-def $eflags
+ ; CHECK: [[MOVZX32rr8_:%[0-9]+]]:gr32 = MOVZX32rr8 [[SETCCr]]
+ ; CHECK: [[AND32ri8_:%[0-9]+]]:gr32 = AND32ri8 [[MOVZX32rr8_]], 1, implicit-def $eflags
; CHECK: $eax = COPY [[AND32ri8_]]
; CHECK: RET 0, implicit $eax
%0(s32) = COPY $edi
%1(s32) = COPY $esi
- %2(s1) = G_ICMP intpred(ugt), %0(s32), %1
- %3(s32) = G_ZEXT %2(s1)
+ %4(s8) = G_ICMP intpred(ugt), %0(s32), %1
+ %5(s32) = G_CONSTANT i32 1
+ %6(s32) = G_ANYEXT %4(s8)
+ %3(s32) = G_AND %6, %5
$eax = COPY %3(s32)
RET 0, implicit $eax
registers:
- { id: 0, class: gpr }
- { id: 1, class: gpr }
- - { id: 2, class: gpr }
+ - { id: 2, class: _ }
- { id: 3, class: gpr }
+ - { id: 4, class: gpr }
+ - { id: 5, class: gpr }
+ - { id: 6, class: gpr }
body: |
bb.1 (%ir-block.0):
liveins: $edi, $esi
; CHECK: [[COPY1:%[0-9]+]]:gr32 = COPY $esi
; CHECK: CMP32rr [[COPY]], [[COPY1]], implicit-def $eflags
; CHECK: [[SETCCr:%[0-9]+]]:gr8 = SETCCr 3, implicit $eflags
- ; CHECK: [[DEF:%[0-9]+]]:gr32 = IMPLICIT_DEF
- ; CHECK: [[INSERT_SUBREG:%[0-9]+]]:gr32 = INSERT_SUBREG [[DEF]], [[SETCCr]], %subreg.sub_8bit
- ; CHECK: [[AND32ri8_:%[0-9]+]]:gr32 = AND32ri8 [[INSERT_SUBREG]], 1, implicit-def $eflags
+ ; CHECK: [[MOVZX32rr8_:%[0-9]+]]:gr32 = MOVZX32rr8 [[SETCCr]]
+ ; CHECK: [[AND32ri8_:%[0-9]+]]:gr32 = AND32ri8 [[MOVZX32rr8_]], 1, implicit-def $eflags
; CHECK: $eax = COPY [[AND32ri8_]]
; CHECK: RET 0, implicit $eax
%0(s32) = COPY $edi
%1(s32) = COPY $esi
- %2(s1) = G_ICMP intpred(uge), %0(s32), %1
- %3(s32) = G_ZEXT %2(s1)
+ %4(s8) = G_ICMP intpred(uge), %0(s32), %1
+ %5(s32) = G_CONSTANT i32 1
+ %6(s32) = G_ANYEXT %4(s8)
+ %3(s32) = G_AND %6, %5
$eax = COPY %3(s32)
RET 0, implicit $eax
registers:
- { id: 0, class: gpr }
- { id: 1, class: gpr }
- - { id: 2, class: gpr }
+ - { id: 2, class: _ }
- { id: 3, class: gpr }
+ - { id: 4, class: gpr }
+ - { id: 5, class: gpr }
+ - { id: 6, class: gpr }
body: |
bb.1 (%ir-block.0):
liveins: $edi, $esi
; CHECK: [[COPY1:%[0-9]+]]:gr32 = COPY $esi
; CHECK: CMP32rr [[COPY]], [[COPY1]], implicit-def $eflags
; CHECK: [[SETCCr:%[0-9]+]]:gr8 = SETCCr 2, implicit $eflags
- ; CHECK: [[DEF:%[0-9]+]]:gr32 = IMPLICIT_DEF
- ; CHECK: [[INSERT_SUBREG:%[0-9]+]]:gr32 = INSERT_SUBREG [[DEF]], [[SETCCr]], %subreg.sub_8bit
- ; CHECK: [[AND32ri8_:%[0-9]+]]:gr32 = AND32ri8 [[INSERT_SUBREG]], 1, implicit-def $eflags
+ ; CHECK: [[MOVZX32rr8_:%[0-9]+]]:gr32 = MOVZX32rr8 [[SETCCr]]
+ ; CHECK: [[AND32ri8_:%[0-9]+]]:gr32 = AND32ri8 [[MOVZX32rr8_]], 1, implicit-def $eflags
; CHECK: $eax = COPY [[AND32ri8_]]
; CHECK: RET 0, implicit $eax
%0(s32) = COPY $edi
%1(s32) = COPY $esi
- %2(s1) = G_ICMP intpred(ult), %0(s32), %1
- %3(s32) = G_ZEXT %2(s1)
+ %4(s8) = G_ICMP intpred(ult), %0(s32), %1
+ %5(s32) = G_CONSTANT i32 1
+ %6(s32) = G_ANYEXT %4(s8)
+ %3(s32) = G_AND %6, %5
$eax = COPY %3(s32)
RET 0, implicit $eax
registers:
- { id: 0, class: gpr }
- { id: 1, class: gpr }
- - { id: 2, class: gpr }
+ - { id: 2, class: _ }
- { id: 3, class: gpr }
+ - { id: 4, class: gpr }
+ - { id: 5, class: gpr }
+ - { id: 6, class: gpr }
body: |
bb.1 (%ir-block.0):
liveins: $edi, $esi
; CHECK: [[COPY1:%[0-9]+]]:gr32 = COPY $esi
; CHECK: CMP32rr [[COPY]], [[COPY1]], implicit-def $eflags
; CHECK: [[SETCCr:%[0-9]+]]:gr8 = SETCCr 6, implicit $eflags
- ; CHECK: [[DEF:%[0-9]+]]:gr32 = IMPLICIT_DEF
- ; CHECK: [[INSERT_SUBREG:%[0-9]+]]:gr32 = INSERT_SUBREG [[DEF]], [[SETCCr]], %subreg.sub_8bit
- ; CHECK: [[AND32ri8_:%[0-9]+]]:gr32 = AND32ri8 [[INSERT_SUBREG]], 1, implicit-def $eflags
+ ; CHECK: [[MOVZX32rr8_:%[0-9]+]]:gr32 = MOVZX32rr8 [[SETCCr]]
+ ; CHECK: [[AND32ri8_:%[0-9]+]]:gr32 = AND32ri8 [[MOVZX32rr8_]], 1, implicit-def $eflags
; CHECK: $eax = COPY [[AND32ri8_]]
; CHECK: RET 0, implicit $eax
%0(s32) = COPY $edi
%1(s32) = COPY $esi
- %2(s1) = G_ICMP intpred(ule), %0(s32), %1
- %3(s32) = G_ZEXT %2(s1)
+ %4(s8) = G_ICMP intpred(ule), %0(s32), %1
+ %5(s32) = G_CONSTANT i32 1
+ %6(s32) = G_ANYEXT %4(s8)
+ %3(s32) = G_AND %6, %5
$eax = COPY %3(s32)
RET 0, implicit $eax
registers:
- { id: 0, class: gpr }
- { id: 1, class: gpr }
- - { id: 2, class: gpr }
+ - { id: 2, class: _ }
- { id: 3, class: gpr }
+ - { id: 4, class: gpr }
+ - { id: 5, class: gpr }
+ - { id: 6, class: gpr }
body: |
bb.1 (%ir-block.0):
liveins: $edi, $esi
; CHECK: [[COPY1:%[0-9]+]]:gr32 = COPY $esi
; CHECK: CMP32rr [[COPY]], [[COPY1]], implicit-def $eflags
; CHECK: [[SETCCr:%[0-9]+]]:gr8 = SETCCr 15, implicit $eflags
- ; CHECK: [[DEF:%[0-9]+]]:gr32 = IMPLICIT_DEF
- ; CHECK: [[INSERT_SUBREG:%[0-9]+]]:gr32 = INSERT_SUBREG [[DEF]], [[SETCCr]], %subreg.sub_8bit
- ; CHECK: [[AND32ri8_:%[0-9]+]]:gr32 = AND32ri8 [[INSERT_SUBREG]], 1, implicit-def $eflags
+ ; CHECK: [[MOVZX32rr8_:%[0-9]+]]:gr32 = MOVZX32rr8 [[SETCCr]]
+ ; CHECK: [[AND32ri8_:%[0-9]+]]:gr32 = AND32ri8 [[MOVZX32rr8_]], 1, implicit-def $eflags
; CHECK: $eax = COPY [[AND32ri8_]]
; CHECK: RET 0, implicit $eax
%0(s32) = COPY $edi
%1(s32) = COPY $esi
- %2(s1) = G_ICMP intpred(sgt), %0(s32), %1
- %3(s32) = G_ZEXT %2(s1)
+ %4(s8) = G_ICMP intpred(sgt), %0(s32), %1
+ %5(s32) = G_CONSTANT i32 1
+ %6(s32) = G_ANYEXT %4(s8)
+ %3(s32) = G_AND %6, %5
$eax = COPY %3(s32)
RET 0, implicit $eax
registers:
- { id: 0, class: gpr }
- { id: 1, class: gpr }
- - { id: 2, class: gpr }
+ - { id: 2, class: _ }
- { id: 3, class: gpr }
+ - { id: 4, class: gpr }
+ - { id: 5, class: gpr }
+ - { id: 6, class: gpr }
body: |
bb.1 (%ir-block.0):
liveins: $edi, $esi
; CHECK: [[COPY1:%[0-9]+]]:gr32 = COPY $esi
; CHECK: CMP32rr [[COPY]], [[COPY1]], implicit-def $eflags
; CHECK: [[SETCCr:%[0-9]+]]:gr8 = SETCCr 13, implicit $eflags
- ; CHECK: [[DEF:%[0-9]+]]:gr32 = IMPLICIT_DEF
- ; CHECK: [[INSERT_SUBREG:%[0-9]+]]:gr32 = INSERT_SUBREG [[DEF]], [[SETCCr]], %subreg.sub_8bit
- ; CHECK: [[AND32ri8_:%[0-9]+]]:gr32 = AND32ri8 [[INSERT_SUBREG]], 1, implicit-def $eflags
+ ; CHECK: [[MOVZX32rr8_:%[0-9]+]]:gr32 = MOVZX32rr8 [[SETCCr]]
+ ; CHECK: [[AND32ri8_:%[0-9]+]]:gr32 = AND32ri8 [[MOVZX32rr8_]], 1, implicit-def $eflags
; CHECK: $eax = COPY [[AND32ri8_]]
; CHECK: RET 0, implicit $eax
%0(s32) = COPY $edi
%1(s32) = COPY $esi
- %2(s1) = G_ICMP intpred(sge), %0(s32), %1
- %3(s32) = G_ZEXT %2(s1)
+ %4(s8) = G_ICMP intpred(sge), %0(s32), %1
+ %5(s32) = G_CONSTANT i32 1
+ %6(s32) = G_ANYEXT %4(s8)
+ %3(s32) = G_AND %6, %5
$eax = COPY %3(s32)
RET 0, implicit $eax
registers:
- { id: 0, class: gpr }
- { id: 1, class: gpr }
- - { id: 2, class: gpr }
+ - { id: 2, class: _ }
- { id: 3, class: gpr }
+ - { id: 4, class: gpr }
+ - { id: 5, class: gpr }
+ - { id: 6, class: gpr }
body: |
bb.1 (%ir-block.0):
liveins: $edi, $esi
; CHECK: [[COPY1:%[0-9]+]]:gr32 = COPY $esi
; CHECK: CMP32rr [[COPY]], [[COPY1]], implicit-def $eflags
; CHECK: [[SETCCr:%[0-9]+]]:gr8 = SETCCr 12, implicit $eflags
- ; CHECK: [[DEF:%[0-9]+]]:gr32 = IMPLICIT_DEF
- ; CHECK: [[INSERT_SUBREG:%[0-9]+]]:gr32 = INSERT_SUBREG [[DEF]], [[SETCCr]], %subreg.sub_8bit
- ; CHECK: [[AND32ri8_:%[0-9]+]]:gr32 = AND32ri8 [[INSERT_SUBREG]], 1, implicit-def $eflags
+ ; CHECK: [[MOVZX32rr8_:%[0-9]+]]:gr32 = MOVZX32rr8 [[SETCCr]]
+ ; CHECK: [[AND32ri8_:%[0-9]+]]:gr32 = AND32ri8 [[MOVZX32rr8_]], 1, implicit-def $eflags
; CHECK: $eax = COPY [[AND32ri8_]]
; CHECK: RET 0, implicit $eax
%0(s32) = COPY $edi
%1(s32) = COPY $esi
- %2(s1) = G_ICMP intpred(slt), %0(s32), %1
- %3(s32) = G_ZEXT %2(s1)
+ %4(s8) = G_ICMP intpred(slt), %0(s32), %1
+ %5(s32) = G_CONSTANT i32 1
+ %6(s32) = G_ANYEXT %4(s8)
+ %3(s32) = G_AND %6, %5
$eax = COPY %3(s32)
RET 0, implicit $eax
registers:
- { id: 0, class: gpr }
- { id: 1, class: gpr }
- - { id: 2, class: gpr }
+ - { id: 2, class: _ }
- { id: 3, class: gpr }
+ - { id: 4, class: gpr }
+ - { id: 5, class: gpr }
+ - { id: 6, class: gpr }
body: |
bb.1 (%ir-block.0):
liveins: $edi, $esi
; CHECK: [[COPY1:%[0-9]+]]:gr32 = COPY $esi
; CHECK: CMP32rr [[COPY]], [[COPY1]], implicit-def $eflags
; CHECK: [[SETCCr:%[0-9]+]]:gr8 = SETCCr 14, implicit $eflags
- ; CHECK: [[DEF:%[0-9]+]]:gr32 = IMPLICIT_DEF
- ; CHECK: [[INSERT_SUBREG:%[0-9]+]]:gr32 = INSERT_SUBREG [[DEF]], [[SETCCr]], %subreg.sub_8bit
- ; CHECK: [[AND32ri8_:%[0-9]+]]:gr32 = AND32ri8 [[INSERT_SUBREG]], 1, implicit-def $eflags
+ ; CHECK: [[MOVZX32rr8_:%[0-9]+]]:gr32 = MOVZX32rr8 [[SETCCr]]
+ ; CHECK: [[AND32ri8_:%[0-9]+]]:gr32 = AND32ri8 [[MOVZX32rr8_]], 1, implicit-def $eflags
; CHECK: $eax = COPY [[AND32ri8_]]
; CHECK: RET 0, implicit $eax
%0(s32) = COPY $edi
%1(s32) = COPY $esi
- %2(s1) = G_ICMP intpred(sle), %0(s32), %1
- %3(s32) = G_ZEXT %2(s1)
+ %4(s8) = G_ICMP intpred(sle), %0(s32), %1
+ %5(s32) = G_CONSTANT i32 1
+ %6(s32) = G_ANYEXT %4(s8)
+ %3(s32) = G_AND %6, %5
$eax = COPY %3(s32)
RET 0, implicit $eax
%4:gpr(s32) = COPY $edx
%2:gpr(s8) = G_TRUNC %4(s32)
%5:gpr(s32) = G_CONSTANT i32 0
- %6:gpr(s1) = G_ICMP intpred(sgt), %0(s32), %5
+ %8:gpr(s8) = G_ICMP intpred(sgt), %0(s32), %5
+ %6:gpr(s1) = G_TRUNC %8(s8)
G_BRCOND %6(s1), %bb.3
bb.2.cond.false:
%4:gpr(s32) = COPY $edx
%2:gpr(s16) = G_TRUNC %4(s32)
%5:gpr(s32) = G_CONSTANT i32 0
- %6:gpr(s1) = G_ICMP intpred(sgt), %0(s32), %5
+ %8:gpr(s8) = G_ICMP intpred(sgt), %0(s32), %5
+ %6:gpr(s1) = G_TRUNC %8(s8)
G_BRCOND %6(s1), %bb.3
bb.2.cond.false:
- { id: 3, class: gpr, preferred-register: '' }
- { id: 4, class: gpr, preferred-register: '' }
- { id: 5, class: gpr, preferred-register: '' }
+ - { id: 6, class: gpr, preferred-register: '' }
body: |
; ALL-LABEL: name: test_i32
; ALL: bb.0.entry:
%1(s32) = COPY $esi
%2(s32) = COPY $edx
%3(s32) = G_CONSTANT i32 0
- %4(s1) = G_ICMP intpred(sgt), %0(s32), %3
+ %6(s8) = G_ICMP intpred(sgt), %0(s32), %3
+ %4:gpr(s1) = G_TRUNC %6(s8)
G_BRCOND %4(s1), %bb.2
G_BR %bb.3
- { id: 3, class: gpr, preferred-register: '' }
- { id: 4, class: gpr, preferred-register: '' }
- { id: 5, class: gpr, preferred-register: '' }
+ - { id: 6, class: gpr, preferred-register: '' }
body: |
; ALL-LABEL: name: test_i64
; ALL: bb.0.entry:
%1(s64) = COPY $rsi
%2(s64) = COPY $rdx
%3(s32) = G_CONSTANT i32 0
- %4(s1) = G_ICMP intpred(sgt), %0(s32), %3
+ %6(s8) = G_ICMP intpred(sgt), %0(s32), %3
+ %4:gpr(s1) = G_TRUNC %6(s8)
G_BRCOND %4(s1), %bb.2
G_BR %bb.3
%4:vecr(s128) = COPY $xmm1
%2:vecr(s32) = G_TRUNC %4(s128)
%5:gpr(s32) = G_CONSTANT i32 0
- %6:gpr(s1) = G_ICMP intpred(sgt), %0(s32), %5
+ %9:gpr(s8) = G_ICMP intpred(sgt), %0(s32), %5
+ %6:gpr(s1) = G_TRUNC %9(s8)
G_BRCOND %6(s1), %bb.3
bb.2.cond.false:
%4:vecr(s128) = COPY $xmm1
%2:vecr(s64) = G_TRUNC %4(s128)
%5:gpr(s32) = G_CONSTANT i32 0
- %6:gpr(s1) = G_ICMP intpred(sgt), %0(s32), %5
+ %9:gpr(s8) = G_ICMP intpred(sgt), %0(s32), %5
+ %6:gpr(s1) = G_TRUNC %9(s8)
G_BRCOND %6(s1), %bb.3
bb.2.cond.false: