br label %loop.body3
}
-define void @unnatural_cfg2() {
+define void @unnatural_cfg2(i32* %p0, i32 %a0) {
; Test that we can handle a loop with a nested natural loop *and* an unnatural
; loop. This was reduced from a crash on block placement when run over
; single-source GCC.
br label %loop.header
loop.header:
- %comp0 = icmp eq i32* undef, null
+ %comp0 = icmp eq i32* %p0, null
br i1 %comp0, label %bail, label %loop.body1
loop.body1:
loop.inner1.begin:
%valphi = phi i32* [ %val2, %loop.inner1.end ], [ %val1, %loop.body3 ], [ %val0, %loop.body1 ]
%castval = bitcast i32* %valphi to i32*
- %comp1 = icmp eq i32 undef, 48
+ %comp1 = icmp eq i32 %a0, 48
br i1 %comp1, label %loop.inner1.end, label %loop.body4
loop.inner1.end:
br label %loop.body4
loop.body4:
- %comp2 = icmp ult i32 undef, 3
+ %comp2 = icmp ult i32 %a0, 3
br i1 %comp2, label %loop.inner2.begin, label %loop.end
loop.inner2.begin:
br i1 false, label %loop.end, label %loop.inner2.end
loop.inner2.end:
- %comp3 = icmp eq i32 undef, 1769472
+ %comp3 = icmp eq i32 %a0, 1769472
br i1 %comp3, label %loop.end, label %loop.inner2.begin
loop.end:
ret i32 %if
}
-define i32 @t3(i32 %x) nounwind readonly {
+define i32 @t3(i32 %x, i64 %y) nounwind readonly {
; X32-LABEL: t3:
; X32: # %bb.0: # %entry
+; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: cmpl $1, {{[0-9]+}}(%esp)
-; X32-NEXT: sbbl %eax, %eax
-; X32-NEXT: cmpl %eax, %eax
-; X32-NEXT: sbbl %eax, %eax
+; X32-NEXT: sbbl %ecx, %ecx
+; X32-NEXT: cmpl %ecx, {{[0-9]+}}(%esp)
+; X32-NEXT: sbbl %ecx, %eax
; X32-NEXT: xorl %eax, %eax
; X32-NEXT: retl
;
; X64-NEXT: testl %edi, %edi
; X64-NEXT: sete %al
; X64-NEXT: negq %rax
-; X64-NEXT: cmpq %rax, %rax
+; X64-NEXT: cmpq %rax, %rsi
; X64-NEXT: xorl %eax, %eax
; X64-NEXT: retq
entry:
%cond = sext i1 %not.tobool to i32
%conv = sext i1 %not.tobool to i64
%add13 = add i64 0, %conv
- %cmp = icmp ult i64 undef, %add13
+ %cmp = icmp ult i64 %y, %add13
br i1 %cmp, label %if.then, label %if.end
if.then: