; LINUX-NEXT: .cfi_offset %r14, -32
; LINUX-NEXT: .cfi_offset %r15, -24
; LINUX-NEXT: .cfi_offset %rbp, -16
+; LINUX-NEXT: movaps %xmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; LINUX-NEXT: movaps %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; LINUX-NEXT: movaps %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; LINUX-NEXT: movaps %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; LINUX-NEXT: movaps %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; LINUX-NEXT: movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; LINUX-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; LINUX-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; LINUX-NEXT: movb %al, {{[-0-9]+}}(%r{{[sb]}}p) # 1-byte Spill
; LINUX-NEXT: movq %r9, %r15
; LINUX-NEXT: movq %r8, %r12
; LINUX-NEXT: movq %rcx, %r13
; LINUX-NEXT: movq %rdx, %rbp
; LINUX-NEXT: movq %rsi, %rbx
; LINUX-NEXT: movq %rdi, %r14
-; LINUX-NEXT: movb %al, {{[-0-9]+}}(%r{{[sb]}}p) # 1-byte Spill
+; LINUX-NEXT: movq %rsi, {{[0-9]+}}(%rsp)
+; LINUX-NEXT: movq %rdx, {{[0-9]+}}(%rsp)
+; LINUX-NEXT: movq %rcx, {{[0-9]+}}(%rsp)
+; LINUX-NEXT: movq %r8, {{[0-9]+}}(%rsp)
+; LINUX-NEXT: movq %r9, {{[0-9]+}}(%rsp)
; LINUX-NEXT: testb %al, %al
; LINUX-NEXT: je .LBB0_2
; LINUX-NEXT: # %bb.1:
; LINUX-NEXT: movaps %xmm6, {{[0-9]+}}(%rsp)
; LINUX-NEXT: movaps %xmm7, {{[0-9]+}}(%rsp)
; LINUX-NEXT: .LBB0_2:
-; LINUX-NEXT: movq %rbx, {{[0-9]+}}(%rsp)
-; LINUX-NEXT: movq %rbp, {{[0-9]+}}(%rsp)
-; LINUX-NEXT: movq %r13, {{[0-9]+}}(%rsp)
-; LINUX-NEXT: movq %r12, {{[0-9]+}}(%rsp)
-; LINUX-NEXT: movq %r15, {{[0-9]+}}(%rsp)
; LINUX-NEXT: leaq {{[0-9]+}}(%rsp), %rax
; LINUX-NEXT: movq %rax, {{[0-9]+}}(%rsp)
; LINUX-NEXT: leaq {{[0-9]+}}(%rsp), %rax
; LINUX-NEXT: movq %rax, {{[0-9]+}}(%rsp)
; LINUX-NEXT: movabsq $206158430216, %rax # imm = 0x3000000008
; LINUX-NEXT: movq %rax, {{[0-9]+}}(%rsp)
-; LINUX-NEXT: movq %r14, %rdi
-; LINUX-NEXT: movaps %xmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; LINUX-NEXT: movaps %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; LINUX-NEXT: movaps %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; LINUX-NEXT: movaps %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; LINUX-NEXT: movaps %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; LINUX-NEXT: movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; LINUX-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; LINUX-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; LINUX-NEXT: callq get_f
+; LINUX-NEXT: callq get_f@PLT
; LINUX-NEXT: movq %rax, %r11
; LINUX-NEXT: movq %r14, %rdi
; LINUX-NEXT: movq %rbx, %rsi
; LINUX-X32-NEXT: .cfi_offset %r14, -32
; LINUX-X32-NEXT: .cfi_offset %r15, -24
; LINUX-X32-NEXT: .cfi_offset %rbp, -16
+; LINUX-X32-NEXT: movaps %xmm7, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; LINUX-X32-NEXT: movaps %xmm6, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; LINUX-X32-NEXT: movaps %xmm5, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; LINUX-X32-NEXT: movaps %xmm4, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; LINUX-X32-NEXT: movaps %xmm3, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; LINUX-X32-NEXT: movaps %xmm2, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; LINUX-X32-NEXT: movaps %xmm1, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; LINUX-X32-NEXT: movaps %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
+; LINUX-X32-NEXT: movb %al, {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Spill
; LINUX-X32-NEXT: movq %r9, %r15
; LINUX-X32-NEXT: movq %r8, %r12
; LINUX-X32-NEXT: movq %rcx, %r13
; LINUX-X32-NEXT: movq %rdx, %rbp
; LINUX-X32-NEXT: movq %rsi, %rbx
; LINUX-X32-NEXT: movq %rdi, %r14
-; LINUX-X32-NEXT: movb %al, {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Spill
+; LINUX-X32-NEXT: movq %rsi, {{[0-9]+}}(%esp)
+; LINUX-X32-NEXT: movq %rdx, {{[0-9]+}}(%esp)
+; LINUX-X32-NEXT: movq %rcx, {{[0-9]+}}(%esp)
+; LINUX-X32-NEXT: movq %r8, {{[0-9]+}}(%esp)
+; LINUX-X32-NEXT: movq %r9, {{[0-9]+}}(%esp)
; LINUX-X32-NEXT: testb %al, %al
; LINUX-X32-NEXT: je .LBB0_2
; LINUX-X32-NEXT: # %bb.1:
; LINUX-X32-NEXT: movaps %xmm6, {{[0-9]+}}(%esp)
; LINUX-X32-NEXT: movaps %xmm7, {{[0-9]+}}(%esp)
; LINUX-X32-NEXT: .LBB0_2:
-; LINUX-X32-NEXT: movq %rbx, {{[0-9]+}}(%esp)
-; LINUX-X32-NEXT: movq %rbp, {{[0-9]+}}(%esp)
-; LINUX-X32-NEXT: movq %r13, {{[0-9]+}}(%esp)
-; LINUX-X32-NEXT: movq %r12, {{[0-9]+}}(%esp)
-; LINUX-X32-NEXT: movq %r15, {{[0-9]+}}(%esp)
; LINUX-X32-NEXT: leal {{[0-9]+}}(%rsp), %eax
; LINUX-X32-NEXT: movl %eax, {{[0-9]+}}(%esp)
; LINUX-X32-NEXT: leal {{[0-9]+}}(%rsp), %eax
; LINUX-X32-NEXT: movl %eax, {{[0-9]+}}(%esp)
; LINUX-X32-NEXT: movabsq $206158430216, %rax # imm = 0x3000000008
; LINUX-X32-NEXT: movq %rax, {{[0-9]+}}(%esp)
-; LINUX-X32-NEXT: movq %r14, %rdi
-; LINUX-X32-NEXT: movaps %xmm7, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
-; LINUX-X32-NEXT: movaps %xmm6, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
-; LINUX-X32-NEXT: movaps %xmm5, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
-; LINUX-X32-NEXT: movaps %xmm4, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
-; LINUX-X32-NEXT: movaps %xmm3, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
-; LINUX-X32-NEXT: movaps %xmm2, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
-; LINUX-X32-NEXT: movaps %xmm1, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
-; LINUX-X32-NEXT: movaps %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
-; LINUX-X32-NEXT: callq get_f
+; LINUX-X32-NEXT: callq get_f@PLT
; LINUX-X32-NEXT: movl %eax, %r11d
; LINUX-X32-NEXT: movq %r14, %rdi
; LINUX-X32-NEXT: movq %rbx, %rsi
-; RUN: llc < %s -mtriple=x86_64-linux-gnux32 | FileCheck %s -check-prefix=CHECK -check-prefix=SSE
-; RUN: llc < %s -mtriple=x86_64-linux-gnux32 -mattr=-sse | FileCheck %s -check-prefix=CHECK -check-prefix=NOSSE
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=x86_64-linux-gnux32 | FileCheck %s -check-prefix=SSE
+; RUN: llc < %s -mtriple=x86_64-linux-gnux32 -mattr=-sse | FileCheck %s -check-prefix=NOSSE
+; RUN: llc < %s -mtriple=i386-linux-gnux32 | FileCheck %s -check-prefix=32BITABI
+; RUN: llc < %s -mtriple=i686-linux-gnux32 | FileCheck %s -check-prefix=32BITABI
;
; Verifies that x32 va_start lowering is sane. To regenerate this test, use
; cat <<EOF |
%struct.__va_list_tag = type { i32, i32, i8*, i8* }
define i32 @foo(float %a, i8* nocapture readnone %fmt, ...) nounwind {
+; SSE-LABEL: foo:
+; SSE: # %bb.0: # %entry
+; SSE-NEXT: subl $72, %esp
+; SSE-NEXT: movq %rsi, -{{[0-9]+}}(%esp)
+; SSE-NEXT: movq %rdx, -{{[0-9]+}}(%esp)
+; SSE-NEXT: movq %rcx, -{{[0-9]+}}(%esp)
+; SSE-NEXT: movq %r8, -{{[0-9]+}}(%esp)
+; SSE-NEXT: movq %r9, -{{[0-9]+}}(%esp)
+; SSE-NEXT: testb %al, %al
+; SSE-NEXT: je .LBB0_5
+; SSE-NEXT: # %bb.4: # %entry
+; SSE-NEXT: movaps %xmm1, -{{[0-9]+}}(%esp)
+; SSE-NEXT: movaps %xmm2, -{{[0-9]+}}(%esp)
+; SSE-NEXT: movaps %xmm3, -{{[0-9]+}}(%esp)
+; SSE-NEXT: movaps %xmm4, (%esp)
+; SSE-NEXT: movaps %xmm5, {{[0-9]+}}(%esp)
+; SSE-NEXT: movaps %xmm6, {{[0-9]+}}(%esp)
+; SSE-NEXT: movaps %xmm7, {{[0-9]+}}(%esp)
+; SSE-NEXT: .LBB0_5: # %entry
+; SSE-NEXT: leal -{{[0-9]+}}(%rsp), %eax
+; SSE-NEXT: movl %eax, -{{[0-9]+}}(%esp)
+; SSE-NEXT: leal {{[0-9]+}}(%rsp), %eax
+; SSE-NEXT: movl %eax, -{{[0-9]+}}(%esp)
+; SSE-NEXT: movabsq $274877906952, %rax # imm = 0x4000000008
+; SSE-NEXT: movq %rax, -{{[0-9]+}}(%esp)
+; SSE-NEXT: movl $8, %ecx
+; SSE-NEXT: cmpl $40, %ecx
+; SSE-NEXT: ja .LBB0_2
+; SSE-NEXT: # %bb.1: # %vaarg.in_reg
+; SSE-NEXT: movl -{{[0-9]+}}(%esp), %eax
+; SSE-NEXT: addl %ecx, %eax
+; SSE-NEXT: addl $8, %ecx
+; SSE-NEXT: movl %ecx, -{{[0-9]+}}(%esp)
+; SSE-NEXT: jmp .LBB0_3
+; SSE-NEXT: .LBB0_2: # %vaarg.in_mem
+; SSE-NEXT: movl -{{[0-9]+}}(%esp), %eax
+; SSE-NEXT: leal 8(%rax), %ecx
+; SSE-NEXT: movl %ecx, -{{[0-9]+}}(%esp)
+; SSE-NEXT: .LBB0_3: # %vaarg.end
+; SSE-NEXT: movl (%eax), %eax
+; SSE-NEXT: addl $72, %esp
+; SSE-NEXT: retq
+;
+; NOSSE-LABEL: foo:
+; NOSSE: # %bb.0: # %entry
+; NOSSE-NEXT: movq %rsi, -{{[0-9]+}}(%esp)
+; NOSSE-NEXT: movq %rdx, -{{[0-9]+}}(%esp)
+; NOSSE-NEXT: movq %rcx, -{{[0-9]+}}(%esp)
+; NOSSE-NEXT: movq %r8, -{{[0-9]+}}(%esp)
+; NOSSE-NEXT: movq %r9, -{{[0-9]+}}(%esp)
+; NOSSE-NEXT: leal -{{[0-9]+}}(%rsp), %eax
+; NOSSE-NEXT: movl %eax, -{{[0-9]+}}(%esp)
+; NOSSE-NEXT: leal {{[0-9]+}}(%rsp), %eax
+; NOSSE-NEXT: movl %eax, -{{[0-9]+}}(%esp)
+; NOSSE-NEXT: movabsq $206158430216, %rax # imm = 0x3000000008
+; NOSSE-NEXT: movq %rax, -{{[0-9]+}}(%esp)
+; NOSSE-NEXT: movl $8, %ecx
+; NOSSE-NEXT: cmpl $40, %ecx
+; NOSSE-NEXT: ja .LBB0_2
+; NOSSE-NEXT: # %bb.1: # %vaarg.in_reg
+; NOSSE-NEXT: movl -{{[0-9]+}}(%esp), %eax
+; NOSSE-NEXT: addl %ecx, %eax
+; NOSSE-NEXT: addl $8, %ecx
+; NOSSE-NEXT: movl %ecx, -{{[0-9]+}}(%esp)
+; NOSSE-NEXT: movl (%eax), %eax
+; NOSSE-NEXT: retq
+; NOSSE-NEXT: .LBB0_2: # %vaarg.in_mem
+; NOSSE-NEXT: movl -{{[0-9]+}}(%esp), %eax
+; NOSSE-NEXT: leal 8(%rax), %ecx
+; NOSSE-NEXT: movl %ecx, -{{[0-9]+}}(%esp)
+; NOSSE-NEXT: movl (%eax), %eax
+; NOSSE-NEXT: retq
+;
+; 32BITABI-LABEL: foo:
+; 32BITABI: # %bb.0: # %entry
+; 32BITABI-NEXT: subl $28, %esp
+; 32BITABI-NEXT: leal {{[0-9]+}}(%esp), %ecx
+; 32BITABI-NEXT: movl %ecx, (%esp)
+; 32BITABI-NEXT: cmpl $40, %ecx
+; 32BITABI-NEXT: ja .LBB0_2
+; 32BITABI-NEXT: # %bb.1: # %vaarg.in_reg
+; 32BITABI-NEXT: movl {{[0-9]+}}(%esp), %eax
+; 32BITABI-NEXT: addl %ecx, %eax
+; 32BITABI-NEXT: addl $8, %ecx
+; 32BITABI-NEXT: movl %ecx, (%esp)
+; 32BITABI-NEXT: jmp .LBB0_3
+; 32BITABI-NEXT: .LBB0_2: # %vaarg.in_mem
+; 32BITABI-NEXT: movl {{[0-9]+}}(%esp), %eax
+; 32BITABI-NEXT: leal 8(%eax), %ecx
+; 32BITABI-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; 32BITABI-NEXT: .LBB0_3: # %vaarg.end
+; 32BITABI-NEXT: movl (%eax), %eax
+; 32BITABI-NEXT: addl $28, %esp
+; 32BITABI-NEXT: retl
entry:
%ap = alloca [1 x %struct.__va_list_tag], align 16
%0 = bitcast [1 x %struct.__va_list_tag]* %ap to i8*
call void @llvm.lifetime.start.p0i8(i64 16, i8* %0) #2
call void @llvm.va_start(i8* %0)
-; SSE: subl $72, %esp
-; SSE: testb %al, %al
-; SSE: je .[[NOFP:.*]]
-; SSE-DAG: movaps %xmm1
-; SSE-DAG: movaps %xmm2
-; SSE-DAG: movaps %xmm3
-; SSE-DAG: movaps %xmm4
-; SSE-DAG: movaps %xmm5
-; SSE-DAG: movaps %xmm6
-; SSE-DAG: movaps %xmm7
-; NOSSE-NOT: xmm
-; SSE: .[[NOFP]]:
-; CHECK-DAG: movq %r9
-; CHECK-DAG: movq %r8
-; CHECK-DAG: movq %rcx
-; CHECK-DAG: movq %rdx
-; CHECK-DAG: movq %rsi
%gp_offset_p = getelementptr inbounds [1 x %struct.__va_list_tag], [1 x %struct.__va_list_tag]* %ap, i32 0, i32 0, i32 0
%gp_offset = load i32, i32* %gp_offset_p, align 16
%fits_in_gp = icmp ult i32 %gp_offset, 41
br i1 %fits_in_gp, label %vaarg.in_reg, label %vaarg.in_mem
-; CHECK: cmpl $40, [[COUNT:.*]]
-; CHECK: ja .[[IN_MEM:.*]]
vaarg.in_reg: ; preds = %entry
%1 = getelementptr inbounds [1 x %struct.__va_list_tag], [1 x %struct.__va_list_tag]* %ap, i32 0, i32 0, i32 3
%3 = add i32 %gp_offset, 8
store i32 %3, i32* %gp_offset_p, align 16
br label %vaarg.end
-; CHECK: movl {{[^,]*}}, [[ADDR:.*]]
-; CHECK: addl [[COUNT]], [[ADDR]]
-; SSE: jmp .[[END:.*]]
-; NOSSE: movl ([[ADDR]]), %eax
-; NOSSE: retq
-; CHECK: .[[IN_MEM]]:
vaarg.in_mem: ; preds = %entry
%overflow_arg_area_p = getelementptr inbounds [1 x %struct.__va_list_tag], [1 x %struct.__va_list_tag]* %ap, i32 0, i32 0, i32 2
%overflow_arg_area = load i8*, i8** %overflow_arg_area_p, align 8
%overflow_arg_area.next = getelementptr i8, i8* %overflow_arg_area, i32 8
store i8* %overflow_arg_area.next, i8** %overflow_arg_area_p, align 8
br label %vaarg.end
-; CHECK: movl {{[^,]*}}, [[ADDR]]
-; NOSSE: movl ([[ADDR]]), %eax
-; NOSSE: retq
-; SSE: .[[END]]:
vaarg.end: ; preds = %vaarg.in_mem, %vaarg.in_reg
%vaarg.addr.in = phi i8* [ %2, %vaarg.in_reg ], [ %overflow_arg_area, %vaarg.in_mem ]
call void @llvm.va_end(i8* %0)
call void @llvm.lifetime.end.p0i8(i64 16, i8* %0) #2
ret i32 %4
-; SSE: movl ([[ADDR]]), %eax
-; SSE: retq
}
; Function Attrs: nounwind argmemonly
; CHECK-X64-NEXT: pushq %rbx
; CHECK-X64-NEXT: subq $224, %rsp
; CHECK-X64-NEXT: testb %al, %al
-; CHECK-X64-NEXT: je LBB0_2
-; CHECK-X64-NEXT: ## %bb.1: ## %entry
+; CHECK-X64-NEXT: je LBB0_47
+; CHECK-X64-NEXT: ## %bb.46: ## %entry
; CHECK-X64-NEXT: movaps %xmm0, 96(%rsp)
; CHECK-X64-NEXT: movaps %xmm1, 112(%rsp)
; CHECK-X64-NEXT: movaps %xmm2, 128(%rsp)
; CHECK-X64-NEXT: movaps %xmm5, 176(%rsp)
; CHECK-X64-NEXT: movaps %xmm6, 192(%rsp)
; CHECK-X64-NEXT: movaps %xmm7, 208(%rsp)
-; CHECK-X64-NEXT: LBB0_2: ## %entry
+; CHECK-X64-NEXT: LBB0_47: ## %entry
; CHECK-X64-NEXT: movq %rdi, 48(%rsp)
; CHECK-X64-NEXT: movq %rsi, 56(%rsp)
; CHECK-X64-NEXT: movq %rdx, 64(%rsp)
; CHECK-X64-NEXT: movq %rax, 16(%rsp)
; CHECK-X64-NEXT: movl (%rsp), %ecx
; CHECK-X64-NEXT: cmpl $48, %ecx
-; CHECK-X64-NEXT: jae LBB0_4
-; CHECK-X64-NEXT: ## %bb.3: ## %entry
+; CHECK-X64-NEXT: jae LBB0_2
+; CHECK-X64-NEXT: ## %bb.1: ## %entry
; CHECK-X64-NEXT: movq 16(%rsp), %rax
; CHECK-X64-NEXT: addq %rcx, %rax
; CHECK-X64-NEXT: addl $8, %ecx
; CHECK-X64-NEXT: movl %ecx, (%rsp)
-; CHECK-X64-NEXT: jmp LBB0_5
-; CHECK-X64-NEXT: LBB0_4: ## %entry
+; CHECK-X64-NEXT: jmp LBB0_3
+; CHECK-X64-NEXT: LBB0_2: ## %entry
; CHECK-X64-NEXT: movq 8(%rsp), %rax
; CHECK-X64-NEXT: movq %rax, %rcx
; CHECK-X64-NEXT: addq $8, %rcx
; CHECK-X64-NEXT: movq %rcx, 8(%rsp)
-; CHECK-X64-NEXT: LBB0_5: ## %entry
+; CHECK-X64-NEXT: LBB0_3: ## %entry
; CHECK-X64-NEXT: movl (%rax), %r10d
; CHECK-X64-NEXT: movl (%rsp), %ecx
; CHECK-X64-NEXT: cmpl $48, %ecx
-; CHECK-X64-NEXT: jae LBB0_7
-; CHECK-X64-NEXT: ## %bb.6: ## %entry
+; CHECK-X64-NEXT: jae LBB0_5
+; CHECK-X64-NEXT: ## %bb.4: ## %entry
; CHECK-X64-NEXT: movq 16(%rsp), %rax
; CHECK-X64-NEXT: addq %rcx, %rax
; CHECK-X64-NEXT: addl $8, %ecx
; CHECK-X64-NEXT: movl %ecx, (%rsp)
-; CHECK-X64-NEXT: jmp LBB0_8
-; CHECK-X64-NEXT: LBB0_7: ## %entry
+; CHECK-X64-NEXT: jmp LBB0_6
+; CHECK-X64-NEXT: LBB0_5: ## %entry
; CHECK-X64-NEXT: movq 8(%rsp), %rax
; CHECK-X64-NEXT: movq %rax, %rcx
; CHECK-X64-NEXT: addq $8, %rcx
; CHECK-X64-NEXT: movq %rcx, 8(%rsp)
-; CHECK-X64-NEXT: LBB0_8: ## %entry
+; CHECK-X64-NEXT: LBB0_6: ## %entry
; CHECK-X64-NEXT: movl (%rax), %r11d
; CHECK-X64-NEXT: movl (%rsp), %ecx
; CHECK-X64-NEXT: cmpl $48, %ecx
-; CHECK-X64-NEXT: jae LBB0_10
-; CHECK-X64-NEXT: ## %bb.9: ## %entry
+; CHECK-X64-NEXT: jae LBB0_8
+; CHECK-X64-NEXT: ## %bb.7: ## %entry
; CHECK-X64-NEXT: movq 16(%rsp), %rax
; CHECK-X64-NEXT: addq %rcx, %rax
; CHECK-X64-NEXT: addl $8, %ecx
; CHECK-X64-NEXT: movl %ecx, (%rsp)
-; CHECK-X64-NEXT: jmp LBB0_11
-; CHECK-X64-NEXT: LBB0_10: ## %entry
+; CHECK-X64-NEXT: jmp LBB0_9
+; CHECK-X64-NEXT: LBB0_8: ## %entry
; CHECK-X64-NEXT: movq 8(%rsp), %rax
; CHECK-X64-NEXT: movq %rax, %rcx
; CHECK-X64-NEXT: addq $8, %rcx
; CHECK-X64-NEXT: movq %rcx, 8(%rsp)
-; CHECK-X64-NEXT: LBB0_11: ## %entry
+; CHECK-X64-NEXT: LBB0_9: ## %entry
; CHECK-X64-NEXT: movl (%rax), %r9d
; CHECK-X64-NEXT: movq 16(%rsp), %rax
; CHECK-X64-NEXT: movq %rax, 40(%rsp)
; CHECK-X64-NEXT: movq %rax, 24(%rsp)
; CHECK-X64-NEXT: movl 4(%rsp), %eax
; CHECK-X64-NEXT: cmpl $176, %eax
-; CHECK-X64-NEXT: jae LBB0_13
-; CHECK-X64-NEXT: ## %bb.12: ## %entry
+; CHECK-X64-NEXT: jae LBB0_11
+; CHECK-X64-NEXT: ## %bb.10: ## %entry
; CHECK-X64-NEXT: addl $16, %eax
; CHECK-X64-NEXT: movl %eax, 4(%rsp)
-; CHECK-X64-NEXT: jmp LBB0_14
-; CHECK-X64-NEXT: LBB0_13: ## %entry
+; CHECK-X64-NEXT: jmp LBB0_12
+; CHECK-X64-NEXT: LBB0_11: ## %entry
; CHECK-X64-NEXT: movq 8(%rsp), %rax
; CHECK-X64-NEXT: addq $8, %rax
; CHECK-X64-NEXT: movq %rax, 8(%rsp)
-; CHECK-X64-NEXT: LBB0_14: ## %entry
+; CHECK-X64-NEXT: LBB0_12: ## %entry
; CHECK-X64-NEXT: movl 28(%rsp), %ecx
; CHECK-X64-NEXT: cmpl $176, %ecx
-; CHECK-X64-NEXT: jae LBB0_16
-; CHECK-X64-NEXT: ## %bb.15: ## %entry
+; CHECK-X64-NEXT: jae LBB0_14
+; CHECK-X64-NEXT: ## %bb.13: ## %entry
; CHECK-X64-NEXT: movq 40(%rsp), %rax
; CHECK-X64-NEXT: addq %rcx, %rax
; CHECK-X64-NEXT: addl $16, %ecx
; CHECK-X64-NEXT: movl %ecx, 28(%rsp)
-; CHECK-X64-NEXT: jmp LBB0_17
-; CHECK-X64-NEXT: LBB0_16: ## %entry
+; CHECK-X64-NEXT: jmp LBB0_15
+; CHECK-X64-NEXT: LBB0_14: ## %entry
; CHECK-X64-NEXT: movq 32(%rsp), %rax
; CHECK-X64-NEXT: movq %rax, %rcx
; CHECK-X64-NEXT: addq $8, %rcx
; CHECK-X64-NEXT: movq %rcx, 32(%rsp)
-; CHECK-X64-NEXT: LBB0_17: ## %entry
+; CHECK-X64-NEXT: LBB0_15: ## %entry
; CHECK-X64-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero
; CHECK-X64-NEXT: movl (%rsp), %ecx
; CHECK-X64-NEXT: cmpl $48, %ecx
-; CHECK-X64-NEXT: jae LBB0_19
-; CHECK-X64-NEXT: ## %bb.18: ## %entry
+; CHECK-X64-NEXT: jae LBB0_17
+; CHECK-X64-NEXT: ## %bb.16: ## %entry
; CHECK-X64-NEXT: movq 16(%rsp), %rax
; CHECK-X64-NEXT: addq %rcx, %rax
; CHECK-X64-NEXT: addl $8, %ecx
; CHECK-X64-NEXT: movl %ecx, (%rsp)
-; CHECK-X64-NEXT: jmp LBB0_20
-; CHECK-X64-NEXT: LBB0_19: ## %entry
+; CHECK-X64-NEXT: jmp LBB0_18
+; CHECK-X64-NEXT: LBB0_17: ## %entry
; CHECK-X64-NEXT: movq 8(%rsp), %rax
; CHECK-X64-NEXT: movq %rax, %rcx
; CHECK-X64-NEXT: addq $8, %rcx
; CHECK-X64-NEXT: movq %rcx, 8(%rsp)
-; CHECK-X64-NEXT: LBB0_20: ## %entry
+; CHECK-X64-NEXT: LBB0_18: ## %entry
; CHECK-X64-NEXT: movl (%rax), %r8d
; CHECK-X64-NEXT: movl 24(%rsp), %eax
; CHECK-X64-NEXT: cmpl $48, %eax
-; CHECK-X64-NEXT: jae LBB0_22
-; CHECK-X64-NEXT: ## %bb.21: ## %entry
+; CHECK-X64-NEXT: jae LBB0_20
+; CHECK-X64-NEXT: ## %bb.19: ## %entry
; CHECK-X64-NEXT: addl $8, %eax
; CHECK-X64-NEXT: movl %eax, 24(%rsp)
-; CHECK-X64-NEXT: jmp LBB0_23
-; CHECK-X64-NEXT: LBB0_22: ## %entry
+; CHECK-X64-NEXT: jmp LBB0_21
+; CHECK-X64-NEXT: LBB0_20: ## %entry
; CHECK-X64-NEXT: movq 32(%rsp), %rax
; CHECK-X64-NEXT: addq $8, %rax
; CHECK-X64-NEXT: movq %rax, 32(%rsp)
-; CHECK-X64-NEXT: LBB0_23: ## %entry
+; CHECK-X64-NEXT: LBB0_21: ## %entry
; CHECK-X64-NEXT: movl (%rsp), %eax
; CHECK-X64-NEXT: cmpl $48, %eax
-; CHECK-X64-NEXT: jae LBB0_25
-; CHECK-X64-NEXT: ## %bb.24: ## %entry
+; CHECK-X64-NEXT: jae LBB0_23
+; CHECK-X64-NEXT: ## %bb.22: ## %entry
; CHECK-X64-NEXT: addl $8, %eax
; CHECK-X64-NEXT: movl %eax, (%rsp)
-; CHECK-X64-NEXT: jmp LBB0_26
-; CHECK-X64-NEXT: LBB0_25: ## %entry
+; CHECK-X64-NEXT: jmp LBB0_24
+; CHECK-X64-NEXT: LBB0_23: ## %entry
; CHECK-X64-NEXT: movq 8(%rsp), %rax
; CHECK-X64-NEXT: addq $8, %rax
; CHECK-X64-NEXT: movq %rax, 8(%rsp)
-; CHECK-X64-NEXT: LBB0_26: ## %entry
+; CHECK-X64-NEXT: LBB0_24: ## %entry
; CHECK-X64-NEXT: movl 24(%rsp), %ecx
; CHECK-X64-NEXT: cmpl $48, %ecx
-; CHECK-X64-NEXT: jae LBB0_28
-; CHECK-X64-NEXT: ## %bb.27: ## %entry
+; CHECK-X64-NEXT: jae LBB0_26
+; CHECK-X64-NEXT: ## %bb.25: ## %entry
; CHECK-X64-NEXT: movq 40(%rsp), %rax
; CHECK-X64-NEXT: addq %rcx, %rax
; CHECK-X64-NEXT: addl $8, %ecx
; CHECK-X64-NEXT: movl %ecx, 24(%rsp)
-; CHECK-X64-NEXT: jmp LBB0_29
-; CHECK-X64-NEXT: LBB0_28: ## %entry
+; CHECK-X64-NEXT: jmp LBB0_27
+; CHECK-X64-NEXT: LBB0_26: ## %entry
; CHECK-X64-NEXT: movq 32(%rsp), %rax
; CHECK-X64-NEXT: movq %rax, %rcx
; CHECK-X64-NEXT: addq $8, %rcx
; CHECK-X64-NEXT: movq %rcx, 32(%rsp)
-; CHECK-X64-NEXT: LBB0_29: ## %entry
+; CHECK-X64-NEXT: LBB0_27: ## %entry
; CHECK-X64-NEXT: movq (%rax), %rcx
; CHECK-X64-NEXT: movl (%rsp), %edx
; CHECK-X64-NEXT: cmpl $48, %edx
-; CHECK-X64-NEXT: jae LBB0_31
-; CHECK-X64-NEXT: ## %bb.30: ## %entry
+; CHECK-X64-NEXT: jae LBB0_29
+; CHECK-X64-NEXT: ## %bb.28: ## %entry
; CHECK-X64-NEXT: movq 16(%rsp), %rax
; CHECK-X64-NEXT: addq %rdx, %rax
; CHECK-X64-NEXT: addl $8, %edx
; CHECK-X64-NEXT: movl %edx, (%rsp)
-; CHECK-X64-NEXT: jmp LBB0_32
-; CHECK-X64-NEXT: LBB0_31: ## %entry
+; CHECK-X64-NEXT: jmp LBB0_30
+; CHECK-X64-NEXT: LBB0_29: ## %entry
; CHECK-X64-NEXT: movq 8(%rsp), %rax
; CHECK-X64-NEXT: movq %rax, %rdx
; CHECK-X64-NEXT: addq $8, %rdx
; CHECK-X64-NEXT: movq %rdx, 8(%rsp)
-; CHECK-X64-NEXT: LBB0_32: ## %entry
+; CHECK-X64-NEXT: LBB0_30: ## %entry
; CHECK-X64-NEXT: movl (%rax), %edx
; CHECK-X64-NEXT: movl 24(%rsp), %eax
; CHECK-X64-NEXT: cmpl $48, %eax
-; CHECK-X64-NEXT: jae LBB0_34
-; CHECK-X64-NEXT: ## %bb.33: ## %entry
+; CHECK-X64-NEXT: jae LBB0_32
+; CHECK-X64-NEXT: ## %bb.31: ## %entry
; CHECK-X64-NEXT: addl $8, %eax
; CHECK-X64-NEXT: movl %eax, 24(%rsp)
-; CHECK-X64-NEXT: jmp LBB0_35
-; CHECK-X64-NEXT: LBB0_34: ## %entry
+; CHECK-X64-NEXT: jmp LBB0_33
+; CHECK-X64-NEXT: LBB0_32: ## %entry
; CHECK-X64-NEXT: movq 32(%rsp), %rax
; CHECK-X64-NEXT: addq $8, %rax
; CHECK-X64-NEXT: movq %rax, 32(%rsp)
-; CHECK-X64-NEXT: LBB0_35: ## %entry
+; CHECK-X64-NEXT: LBB0_33: ## %entry
; CHECK-X64-NEXT: movl 4(%rsp), %eax
; CHECK-X64-NEXT: cmpl $176, %eax
-; CHECK-X64-NEXT: jae LBB0_37
-; CHECK-X64-NEXT: ## %bb.36: ## %entry
+; CHECK-X64-NEXT: jae LBB0_35
+; CHECK-X64-NEXT: ## %bb.34: ## %entry
; CHECK-X64-NEXT: addl $16, %eax
; CHECK-X64-NEXT: movl %eax, 4(%rsp)
-; CHECK-X64-NEXT: jmp LBB0_38
-; CHECK-X64-NEXT: LBB0_37: ## %entry
+; CHECK-X64-NEXT: jmp LBB0_36
+; CHECK-X64-NEXT: LBB0_35: ## %entry
; CHECK-X64-NEXT: movq 8(%rsp), %rax
; CHECK-X64-NEXT: addq $8, %rax
; CHECK-X64-NEXT: movq %rax, 8(%rsp)
-; CHECK-X64-NEXT: LBB0_38: ## %entry
+; CHECK-X64-NEXT: LBB0_36: ## %entry
; CHECK-X64-NEXT: movl 28(%rsp), %esi
; CHECK-X64-NEXT: cmpl $176, %esi
-; CHECK-X64-NEXT: jae LBB0_40
-; CHECK-X64-NEXT: ## %bb.39: ## %entry
+; CHECK-X64-NEXT: jae LBB0_38
+; CHECK-X64-NEXT: ## %bb.37: ## %entry
; CHECK-X64-NEXT: movq 40(%rsp), %rax
; CHECK-X64-NEXT: addq %rsi, %rax
; CHECK-X64-NEXT: addl $16, %esi
; CHECK-X64-NEXT: movl %esi, 28(%rsp)
-; CHECK-X64-NEXT: jmp LBB0_41
-; CHECK-X64-NEXT: LBB0_40: ## %entry
+; CHECK-X64-NEXT: jmp LBB0_39
+; CHECK-X64-NEXT: LBB0_38: ## %entry
; CHECK-X64-NEXT: movq 32(%rsp), %rax
; CHECK-X64-NEXT: movq %rax, %rsi
; CHECK-X64-NEXT: addq $8, %rsi
; CHECK-X64-NEXT: movq %rsi, 32(%rsp)
-; CHECK-X64-NEXT: LBB0_41: ## %entry
+; CHECK-X64-NEXT: LBB0_39: ## %entry
; CHECK-X64-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
; CHECK-X64-NEXT: movl (%rsp), %esi
; CHECK-X64-NEXT: cmpl $48, %esi
-; CHECK-X64-NEXT: jae LBB0_43
-; CHECK-X64-NEXT: ## %bb.42: ## %entry
+; CHECK-X64-NEXT: jae LBB0_41
+; CHECK-X64-NEXT: ## %bb.40: ## %entry
; CHECK-X64-NEXT: movq 16(%rsp), %rax
; CHECK-X64-NEXT: addq %rsi, %rax
; CHECK-X64-NEXT: addl $8, %esi
; CHECK-X64-NEXT: movl %esi, (%rsp)
-; CHECK-X64-NEXT: jmp LBB0_44
-; CHECK-X64-NEXT: LBB0_43: ## %entry
+; CHECK-X64-NEXT: jmp LBB0_42
+; CHECK-X64-NEXT: LBB0_41: ## %entry
; CHECK-X64-NEXT: movq 8(%rsp), %rax
; CHECK-X64-NEXT: movq %rax, %rsi
; CHECK-X64-NEXT: addq $8, %rsi
; CHECK-X64-NEXT: movq %rsi, 8(%rsp)
-; CHECK-X64-NEXT: LBB0_44: ## %entry
+; CHECK-X64-NEXT: LBB0_42: ## %entry
; CHECK-X64-NEXT: movl (%rax), %esi
; CHECK-X64-NEXT: movl 24(%rsp), %eax
; CHECK-X64-NEXT: cmpl $48, %eax
-; CHECK-X64-NEXT: jae LBB0_46
-; CHECK-X64-NEXT: ## %bb.45: ## %entry
+; CHECK-X64-NEXT: jae LBB0_44
+; CHECK-X64-NEXT: ## %bb.43: ## %entry
; CHECK-X64-NEXT: addl $8, %eax
; CHECK-X64-NEXT: movl %eax, 24(%rsp)
-; CHECK-X64-NEXT: jmp LBB0_47
-; CHECK-X64-NEXT: LBB0_46: ## %entry
+; CHECK-X64-NEXT: jmp LBB0_45
+; CHECK-X64-NEXT: LBB0_44: ## %entry
; CHECK-X64-NEXT: movq 32(%rsp), %rax
; CHECK-X64-NEXT: addq $8, %rax
; CHECK-X64-NEXT: movq %rax, 32(%rsp)
-; CHECK-X64-NEXT: LBB0_47: ## %entry
+; CHECK-X64-NEXT: LBB0_45: ## %entry
; CHECK-X64-NEXT: movabsq $_.str, %rdi
; CHECK-X64-NEXT: movabsq $_printf, %rbx
; CHECK-X64-NEXT: movb $2, %al
; CHECK-X32: # %bb.0: # %entry
; CHECK-X32-NEXT: subl $216, %esp
; CHECK-X32-NEXT: testb %al, %al
-; CHECK-X32-NEXT: je .LBB0_2
-; CHECK-X32-NEXT: # %bb.1: # %entry
+; CHECK-X32-NEXT: je .LBB0_47
+; CHECK-X32-NEXT: # %bb.46: # %entry
; CHECK-X32-NEXT: movaps %xmm0, 80(%esp)
; CHECK-X32-NEXT: movaps %xmm1, 96(%esp)
; CHECK-X32-NEXT: movaps %xmm2, 112(%esp)
; CHECK-X32-NEXT: movaps %xmm5, 160(%esp)
; CHECK-X32-NEXT: movaps %xmm6, 176(%esp)
; CHECK-X32-NEXT: movaps %xmm7, 192(%esp)
-; CHECK-X32-NEXT: .LBB0_2: # %entry
+; CHECK-X32-NEXT: .LBB0_47: # %entry
; CHECK-X32-NEXT: movq %rdi, 32(%esp)
; CHECK-X32-NEXT: movq %rsi, 40(%esp)
; CHECK-X32-NEXT: movq %rdx, 48(%esp)
; CHECK-X32-NEXT: movl %eax, 12(%esp)
; CHECK-X32-NEXT: movl (%esp), %ecx
; CHECK-X32-NEXT: cmpl $48, %ecx
-; CHECK-X32-NEXT: jae .LBB0_4
-; CHECK-X32-NEXT: # %bb.3: # %entry
+; CHECK-X32-NEXT: jae .LBB0_2
+; CHECK-X32-NEXT: # %bb.1: # %entry
; CHECK-X32-NEXT: movl 12(%esp), %eax
; CHECK-X32-NEXT: addl %ecx, %eax
; CHECK-X32-NEXT: addl $8, %ecx
; CHECK-X32-NEXT: movl %ecx, (%esp)
-; CHECK-X32-NEXT: jmp .LBB0_5
-; CHECK-X32-NEXT: .LBB0_4: # %entry
+; CHECK-X32-NEXT: jmp .LBB0_3
+; CHECK-X32-NEXT: .LBB0_2: # %entry
; CHECK-X32-NEXT: movl 8(%esp), %eax
; CHECK-X32-NEXT: movl %eax, %ecx
; CHECK-X32-NEXT: addl $8, %ecx
; CHECK-X32-NEXT: movl %ecx, 8(%esp)
-; CHECK-X32-NEXT: .LBB0_5: # %entry
+; CHECK-X32-NEXT: .LBB0_3: # %entry
; CHECK-X32-NEXT: movl (%eax), %r10d
; CHECK-X32-NEXT: movl (%esp), %ecx
; CHECK-X32-NEXT: cmpl $48, %ecx
-; CHECK-X32-NEXT: jae .LBB0_7
-; CHECK-X32-NEXT: # %bb.6: # %entry
+; CHECK-X32-NEXT: jae .LBB0_5
+; CHECK-X32-NEXT: # %bb.4: # %entry
; CHECK-X32-NEXT: movl 12(%esp), %eax
; CHECK-X32-NEXT: addl %ecx, %eax
; CHECK-X32-NEXT: addl $8, %ecx
; CHECK-X32-NEXT: movl %ecx, (%esp)
-; CHECK-X32-NEXT: jmp .LBB0_8
-; CHECK-X32-NEXT: .LBB0_7: # %entry
+; CHECK-X32-NEXT: jmp .LBB0_6
+; CHECK-X32-NEXT: .LBB0_5: # %entry
; CHECK-X32-NEXT: movl 8(%esp), %eax
; CHECK-X32-NEXT: movl %eax, %ecx
; CHECK-X32-NEXT: addl $8, %ecx
; CHECK-X32-NEXT: movl %ecx, 8(%esp)
-; CHECK-X32-NEXT: .LBB0_8: # %entry
+; CHECK-X32-NEXT: .LBB0_6: # %entry
; CHECK-X32-NEXT: movl (%eax), %r11d
; CHECK-X32-NEXT: movl (%esp), %ecx
; CHECK-X32-NEXT: cmpl $48, %ecx
-; CHECK-X32-NEXT: jae .LBB0_10
-; CHECK-X32-NEXT: # %bb.9: # %entry
+; CHECK-X32-NEXT: jae .LBB0_8
+; CHECK-X32-NEXT: # %bb.7: # %entry
; CHECK-X32-NEXT: movl 12(%esp), %eax
; CHECK-X32-NEXT: addl %ecx, %eax
; CHECK-X32-NEXT: addl $8, %ecx
; CHECK-X32-NEXT: movl %ecx, (%esp)
-; CHECK-X32-NEXT: jmp .LBB0_11
-; CHECK-X32-NEXT: .LBB0_10: # %entry
+; CHECK-X32-NEXT: jmp .LBB0_9
+; CHECK-X32-NEXT: .LBB0_8: # %entry
; CHECK-X32-NEXT: movl 8(%esp), %eax
; CHECK-X32-NEXT: movl %eax, %ecx
; CHECK-X32-NEXT: addl $8, %ecx
; CHECK-X32-NEXT: movl %ecx, 8(%esp)
-; CHECK-X32-NEXT: .LBB0_11: # %entry
+; CHECK-X32-NEXT: .LBB0_9: # %entry
; CHECK-X32-NEXT: movl (%eax), %r9d
; CHECK-X32-NEXT: movq (%esp), %rax
; CHECK-X32-NEXT: movq 8(%esp), %rcx
; CHECK-X32-NEXT: movq %rax, 16(%esp)
; CHECK-X32-NEXT: movl 4(%esp), %eax
; CHECK-X32-NEXT: cmpl $176, %eax
-; CHECK-X32-NEXT: jae .LBB0_13
-; CHECK-X32-NEXT: # %bb.12: # %entry
+; CHECK-X32-NEXT: jae .LBB0_11
+; CHECK-X32-NEXT: # %bb.10: # %entry
; CHECK-X32-NEXT: addl $16, %eax
; CHECK-X32-NEXT: movl %eax, 4(%esp)
-; CHECK-X32-NEXT: jmp .LBB0_14
-; CHECK-X32-NEXT: .LBB0_13: # %entry
+; CHECK-X32-NEXT: jmp .LBB0_12
+; CHECK-X32-NEXT: .LBB0_11: # %entry
; CHECK-X32-NEXT: movl 8(%esp), %eax
; CHECK-X32-NEXT: addl $8, %eax
; CHECK-X32-NEXT: movl %eax, 8(%esp)
-; CHECK-X32-NEXT: .LBB0_14: # %entry
+; CHECK-X32-NEXT: .LBB0_12: # %entry
; CHECK-X32-NEXT: movl 20(%esp), %ecx
; CHECK-X32-NEXT: cmpl $176, %ecx
-; CHECK-X32-NEXT: jae .LBB0_16
-; CHECK-X32-NEXT: # %bb.15: # %entry
+; CHECK-X32-NEXT: jae .LBB0_14
+; CHECK-X32-NEXT: # %bb.13: # %entry
; CHECK-X32-NEXT: movl 28(%esp), %eax
; CHECK-X32-NEXT: addl %ecx, %eax
; CHECK-X32-NEXT: addl $16, %ecx
; CHECK-X32-NEXT: movl %ecx, 20(%esp)
-; CHECK-X32-NEXT: jmp .LBB0_17
-; CHECK-X32-NEXT: .LBB0_16: # %entry
+; CHECK-X32-NEXT: jmp .LBB0_15
+; CHECK-X32-NEXT: .LBB0_14: # %entry
; CHECK-X32-NEXT: movl 24(%esp), %eax
; CHECK-X32-NEXT: movl %eax, %ecx
; CHECK-X32-NEXT: addl $8, %ecx
; CHECK-X32-NEXT: movl %ecx, 24(%esp)
-; CHECK-X32-NEXT: .LBB0_17: # %entry
+; CHECK-X32-NEXT: .LBB0_15: # %entry
; CHECK-X32-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero
; CHECK-X32-NEXT: movl (%esp), %ecx
; CHECK-X32-NEXT: cmpl $48, %ecx
-; CHECK-X32-NEXT: jae .LBB0_19
-; CHECK-X32-NEXT: # %bb.18: # %entry
+; CHECK-X32-NEXT: jae .LBB0_17
+; CHECK-X32-NEXT: # %bb.16: # %entry
; CHECK-X32-NEXT: movl 12(%esp), %eax
; CHECK-X32-NEXT: addl %ecx, %eax
; CHECK-X32-NEXT: addl $8, %ecx
; CHECK-X32-NEXT: movl %ecx, (%esp)
-; CHECK-X32-NEXT: jmp .LBB0_20
-; CHECK-X32-NEXT: .LBB0_19: # %entry
+; CHECK-X32-NEXT: jmp .LBB0_18
+; CHECK-X32-NEXT: .LBB0_17: # %entry
; CHECK-X32-NEXT: movl 8(%esp), %eax
; CHECK-X32-NEXT: movl %eax, %ecx
; CHECK-X32-NEXT: addl $8, %ecx
; CHECK-X32-NEXT: movl %ecx, 8(%esp)
-; CHECK-X32-NEXT: .LBB0_20: # %entry
+; CHECK-X32-NEXT: .LBB0_18: # %entry
; CHECK-X32-NEXT: movl (%eax), %r8d
; CHECK-X32-NEXT: movl 16(%esp), %eax
; CHECK-X32-NEXT: cmpl $48, %eax
-; CHECK-X32-NEXT: jae .LBB0_22
-; CHECK-X32-NEXT: # %bb.21: # %entry
+; CHECK-X32-NEXT: jae .LBB0_20
+; CHECK-X32-NEXT: # %bb.19: # %entry
; CHECK-X32-NEXT: addl $8, %eax
; CHECK-X32-NEXT: movl %eax, 16(%esp)
-; CHECK-X32-NEXT: jmp .LBB0_23
-; CHECK-X32-NEXT: .LBB0_22: # %entry
+; CHECK-X32-NEXT: jmp .LBB0_21
+; CHECK-X32-NEXT: .LBB0_20: # %entry
; CHECK-X32-NEXT: movl 24(%esp), %eax
; CHECK-X32-NEXT: addl $8, %eax
; CHECK-X32-NEXT: movl %eax, 24(%esp)
-; CHECK-X32-NEXT: .LBB0_23: # %entry
+; CHECK-X32-NEXT: .LBB0_21: # %entry
; CHECK-X32-NEXT: movl (%esp), %eax
; CHECK-X32-NEXT: cmpl $48, %eax
-; CHECK-X32-NEXT: jae .LBB0_25
-; CHECK-X32-NEXT: # %bb.24: # %entry
+; CHECK-X32-NEXT: jae .LBB0_23
+; CHECK-X32-NEXT: # %bb.22: # %entry
; CHECK-X32-NEXT: addl $8, %eax
; CHECK-X32-NEXT: movl %eax, (%esp)
-; CHECK-X32-NEXT: jmp .LBB0_26
-; CHECK-X32-NEXT: .LBB0_25: # %entry
+; CHECK-X32-NEXT: jmp .LBB0_24
+; CHECK-X32-NEXT: .LBB0_23: # %entry
; CHECK-X32-NEXT: movl 8(%esp), %eax
; CHECK-X32-NEXT: addl $8, %eax
; CHECK-X32-NEXT: movl %eax, 8(%esp)
-; CHECK-X32-NEXT: .LBB0_26: # %entry
+; CHECK-X32-NEXT: .LBB0_24: # %entry
; CHECK-X32-NEXT: movl 16(%esp), %ecx
; CHECK-X32-NEXT: cmpl $48, %ecx
-; CHECK-X32-NEXT: jae .LBB0_28
-; CHECK-X32-NEXT: # %bb.27: # %entry
+; CHECK-X32-NEXT: jae .LBB0_26
+; CHECK-X32-NEXT: # %bb.25: # %entry
; CHECK-X32-NEXT: movl 28(%esp), %eax
; CHECK-X32-NEXT: addl %ecx, %eax
; CHECK-X32-NEXT: addl $8, %ecx
; CHECK-X32-NEXT: movl %ecx, 16(%esp)
-; CHECK-X32-NEXT: jmp .LBB0_29
-; CHECK-X32-NEXT: .LBB0_28: # %entry
+; CHECK-X32-NEXT: jmp .LBB0_27
+; CHECK-X32-NEXT: .LBB0_26: # %entry
; CHECK-X32-NEXT: movl 24(%esp), %eax
; CHECK-X32-NEXT: movl %eax, %ecx
; CHECK-X32-NEXT: addl $8, %ecx
; CHECK-X32-NEXT: movl %ecx, 24(%esp)
-; CHECK-X32-NEXT: .LBB0_29: # %entry
+; CHECK-X32-NEXT: .LBB0_27: # %entry
; CHECK-X32-NEXT: movq (%eax), %rcx
; CHECK-X32-NEXT: movl (%esp), %edx
; CHECK-X32-NEXT: cmpl $48, %edx
-; CHECK-X32-NEXT: jae .LBB0_31
-; CHECK-X32-NEXT: # %bb.30: # %entry
+; CHECK-X32-NEXT: jae .LBB0_29
+; CHECK-X32-NEXT: # %bb.28: # %entry
; CHECK-X32-NEXT: movl 12(%esp), %eax
; CHECK-X32-NEXT: addl %edx, %eax
; CHECK-X32-NEXT: addl $8, %edx
; CHECK-X32-NEXT: movl %edx, (%esp)
-; CHECK-X32-NEXT: jmp .LBB0_32
-; CHECK-X32-NEXT: .LBB0_31: # %entry
+; CHECK-X32-NEXT: jmp .LBB0_30
+; CHECK-X32-NEXT: .LBB0_29: # %entry
; CHECK-X32-NEXT: movl 8(%esp), %eax
; CHECK-X32-NEXT: movl %eax, %edx
; CHECK-X32-NEXT: addl $8, %edx
; CHECK-X32-NEXT: movl %edx, 8(%esp)
-; CHECK-X32-NEXT: .LBB0_32: # %entry
+; CHECK-X32-NEXT: .LBB0_30: # %entry
; CHECK-X32-NEXT: movl (%eax), %edx
; CHECK-X32-NEXT: movl 16(%esp), %eax
; CHECK-X32-NEXT: cmpl $48, %eax
-; CHECK-X32-NEXT: jae .LBB0_34
-; CHECK-X32-NEXT: # %bb.33: # %entry
+; CHECK-X32-NEXT: jae .LBB0_32
+; CHECK-X32-NEXT: # %bb.31: # %entry
; CHECK-X32-NEXT: addl $8, %eax
; CHECK-X32-NEXT: movl %eax, 16(%esp)
-; CHECK-X32-NEXT: jmp .LBB0_35
-; CHECK-X32-NEXT: .LBB0_34: # %entry
+; CHECK-X32-NEXT: jmp .LBB0_33
+; CHECK-X32-NEXT: .LBB0_32: # %entry
; CHECK-X32-NEXT: movl 24(%esp), %eax
; CHECK-X32-NEXT: addl $8, %eax
; CHECK-X32-NEXT: movl %eax, 24(%esp)
-; CHECK-X32-NEXT: .LBB0_35: # %entry
+; CHECK-X32-NEXT: .LBB0_33: # %entry
; CHECK-X32-NEXT: movl 4(%esp), %eax
; CHECK-X32-NEXT: cmpl $176, %eax
-; CHECK-X32-NEXT: jae .LBB0_37
-; CHECK-X32-NEXT: # %bb.36: # %entry
+; CHECK-X32-NEXT: jae .LBB0_35
+; CHECK-X32-NEXT: # %bb.34: # %entry
; CHECK-X32-NEXT: addl $16, %eax
; CHECK-X32-NEXT: movl %eax, 4(%esp)
-; CHECK-X32-NEXT: jmp .LBB0_38
-; CHECK-X32-NEXT: .LBB0_37: # %entry
+; CHECK-X32-NEXT: jmp .LBB0_36
+; CHECK-X32-NEXT: .LBB0_35: # %entry
; CHECK-X32-NEXT: movl 8(%esp), %eax
; CHECK-X32-NEXT: addl $8, %eax
; CHECK-X32-NEXT: movl %eax, 8(%esp)
-; CHECK-X32-NEXT: .LBB0_38: # %entry
+; CHECK-X32-NEXT: .LBB0_36: # %entry
; CHECK-X32-NEXT: movl 20(%esp), %esi
; CHECK-X32-NEXT: cmpl $176, %esi
-; CHECK-X32-NEXT: jae .LBB0_40
-; CHECK-X32-NEXT: # %bb.39: # %entry
+; CHECK-X32-NEXT: jae .LBB0_38
+; CHECK-X32-NEXT: # %bb.37: # %entry
; CHECK-X32-NEXT: movl 28(%esp), %eax
; CHECK-X32-NEXT: addl %esi, %eax
; CHECK-X32-NEXT: addl $16, %esi
; CHECK-X32-NEXT: movl %esi, 20(%esp)
-; CHECK-X32-NEXT: jmp .LBB0_41
-; CHECK-X32-NEXT: .LBB0_40: # %entry
+; CHECK-X32-NEXT: jmp .LBB0_39
+; CHECK-X32-NEXT: .LBB0_38: # %entry
; CHECK-X32-NEXT: movl 24(%esp), %eax
; CHECK-X32-NEXT: movl %eax, %esi
; CHECK-X32-NEXT: addl $8, %esi
; CHECK-X32-NEXT: movl %esi, 24(%esp)
-; CHECK-X32-NEXT: .LBB0_41: # %entry
+; CHECK-X32-NEXT: .LBB0_39: # %entry
; CHECK-X32-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
; CHECK-X32-NEXT: movl (%esp), %esi
; CHECK-X32-NEXT: cmpl $48, %esi
-; CHECK-X32-NEXT: jae .LBB0_43
-; CHECK-X32-NEXT: # %bb.42: # %entry
+; CHECK-X32-NEXT: jae .LBB0_41
+; CHECK-X32-NEXT: # %bb.40: # %entry
; CHECK-X32-NEXT: movl 12(%esp), %eax
; CHECK-X32-NEXT: addl %esi, %eax
; CHECK-X32-NEXT: addl $8, %esi
; CHECK-X32-NEXT: movl %esi, (%esp)
-; CHECK-X32-NEXT: jmp .LBB0_44
-; CHECK-X32-NEXT: .LBB0_43: # %entry
+; CHECK-X32-NEXT: jmp .LBB0_42
+; CHECK-X32-NEXT: .LBB0_41: # %entry
; CHECK-X32-NEXT: movl 8(%esp), %eax
; CHECK-X32-NEXT: movl %eax, %esi
; CHECK-X32-NEXT: addl $8, %esi
; CHECK-X32-NEXT: movl %esi, 8(%esp)
-; CHECK-X32-NEXT: .LBB0_44: # %entry
+; CHECK-X32-NEXT: .LBB0_42: # %entry
; CHECK-X32-NEXT: movl (%eax), %esi
; CHECK-X32-NEXT: movl 16(%esp), %eax
; CHECK-X32-NEXT: cmpl $48, %eax
-; CHECK-X32-NEXT: jae .LBB0_46
-; CHECK-X32-NEXT: # %bb.45: # %entry
+; CHECK-X32-NEXT: jae .LBB0_44
+; CHECK-X32-NEXT: # %bb.43: # %entry
; CHECK-X32-NEXT: addl $8, %eax
; CHECK-X32-NEXT: movl %eax, 16(%esp)
-; CHECK-X32-NEXT: jmp .LBB0_47
-; CHECK-X32-NEXT: .LBB0_46: # %entry
+; CHECK-X32-NEXT: jmp .LBB0_45
+; CHECK-X32-NEXT: .LBB0_44: # %entry
; CHECK-X32-NEXT: movl 24(%esp), %eax
; CHECK-X32-NEXT: addl $8, %eax
; CHECK-X32-NEXT: movl %eax, 24(%esp)
-; CHECK-X32-NEXT: .LBB0_47: # %entry
+; CHECK-X32-NEXT: .LBB0_45: # %entry
; CHECK-X32-NEXT: movl $.str, %edi
; CHECK-X32-NEXT: movb $2, %al
; CHECK-X32-NEXT: pushq %r10
; CHECK-X32-NEXT: movl $-10, %ecx
; CHECK-X32-NEXT: movl $120, %r9d
; CHECK-X32-NEXT: movb $2, %al
-; CHECK-X32-NEXT: callq func
+; CHECK-X32-NEXT: callq func@PLT
; CHECK-X32-NEXT: xorl %eax, %eax
; CHECK-X32-NEXT: popq %rcx
; CHECK-X32-NEXT: retq