-; RUN: llc -mtriple=powerpc64le-unknown-linux-gnu -mcpu=pwr8 %s -o - -verify-machineinstrs | FileCheck %s --check-prefix=CHECK --check-prefix=ENABLE
-; RUN: llc -mtriple=powerpc64le-unknown-linux-gnu %s -o - -enable-shrink-wrap=false -verify-machineinstrs | FileCheck %s --check-prefix=CHECK --check-prefix=DISABLE
+; RUN: llc -mtriple=powerpc64le-unknown-linux-gnu -mcpu=pwr8 %s -o - -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,ENABLE,CHECK-64,ENABLE-64
+; RUN: llc -mtriple=powerpc64le-unknown-linux-gnu %s -o - -enable-shrink-wrap=false -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,DISABLE,CHECK-64,DISABLE-64
+; RUN: llc -mtriple=powerpc-ibm-aix-xcoff -mattr=-altivec -mcpu=pwr8 %s -o - -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,ENABLE,CHECK-32,ENABLE-32
+; RUN: llc -mtriple=powerpc-ibm-aix-xcoff %s -o - -enable-shrink-wrap=false -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,DISABLE,CHECK-32,DISABLE-32
+; RUN: llc -mtriple=powerpc64-ibm-aix-xcoff -mattr=-altivec -mcpu=pwr8 %s -o - -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,ENABLE,CHECK-64,ENABLE-64
+; RUN: llc -mtriple=powerpc64-ibm-aix-xcoff %s -o - -enable-shrink-wrap=false -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,DISABLE,CHECK-64,DISABLE-64
+;
;
; Note: Lots of tests use inline asm instead of regular calls.
; This allows to have a better control on what the allocation will do.
; Initial motivating example: Simple diamond with a call just on one side.
-; CHECK-LABEL: foo:
+; CHECK-LABEL: {{.*}}foo:
;
; Compare the arguments and return
; No prologue needed.
;
; Prologue code.
; At a minimum, we save/restore the link register. Other registers may be saved
-; as well.
-; CHECK: mflr
+; as well.
+; CHECK: mflr
;
; Compare the arguments and jump to exit.
; After the prologue is set.
; DISABLE: cmpw 3, 4
-; DISABLE-NEXT: bge 0, .[[EXIT_LABEL:LBB[0-9_]+]]
+; DISABLE-NEXT: bge 0, {{.*}}[[EXIT_LABEL:BB[0-9_]+]]
;
; Store %a on the stack
; CHECK: stw 3, {{[0-9]+([0-9]+)}}
; CHECK-NEXT: addi 4, 1, {{[0-9]+}}
; Set the first argument to zero.
; CHECK-NEXT: li 3, 0
-; CHECK-NEXT: bl doSomething
+; CHECK-NEXT: bl {{.*}}doSomething
;
; With shrink-wrapping, epilogue is just after the call.
; Restore the link register and return.
-; Note that there could be other epilog code before the link register is
+; Note that there could be other epilog code before the link register is
; restored but we will not check for it here.
; ENABLE: mtlr
; ENABLE-NEXT: blr
declare i32 @doSomething(i32, i32*)
-
; Check that we do not perform the restore inside the loop whereas the save
; is outside.
-; CHECK-LABEL: freqSaveAndRestoreOutsideLoop:
+; CHECK-LABEL: {{.*}}freqSaveAndRestoreOutsideLoop:
;
; Shrink-wrapping allows to skip the prologue in the else case.
; ENABLE: cmplwi 3, 0
-; ENABLE: beq 0, .[[ELSE_LABEL:LBB[0-9_]+]]
+; ENABLE: beq 0, {{.*}}[[ELSE_LABEL:BB[0-9_]+]]
;
; Prologue code.
; Make sure we save the link register
; CHECK: mflr {{[0-9]+}}
;
; DISABLE: cmplwi 3, 0
-; DISABLE: beq 0, .[[ELSE_LABEL:LBB[0-9_]+]]
+; DISABLE: beq 0, {{.*}}[[ELSE_LABEL:BB[0-9_]+]]
;
; Loop preheader
; CHECK-DAG: li [[SUM:[0-9]+]], 0
; CHECK-DAG: li [[IV:[0-9]+]], 10
-;
+;
; Loop body
-; CHECK: .[[LOOP:LBB[0-9_]+]]: # %for.body
-; CHECK: bl something
+; CHECK: {{.*}}[[LOOP:BB[0-9_]+]]: # %for.body
+; CHECK: bl {{.*}}something
+;
; CHECK-DAG: addi [[IV]], [[IV]], -1
-; CHECK-DAG: add [[SUM]], 3, [[SUM]]
-; CHECK-NEXT: cmplwi [[IV]], 0
-; CHECK-NEXT: bne 0, .[[LOOP]]
+; CHECK-DAG: add [[SUM]], 3, [[SUM]]
+; CHECK-DAG: cmplwi [[IV]], 0
+; CHECK-NEXT: bne 0, {{.*}}[[LOOP]]
;
; Next BB.
; CHECK: slwi 3, [[SUM]], 3
;
; Jump to epilogue.
-; DISABLE: b .[[EPILOG_BB:LBB[0-9_]+]]
+; DISABLE: b {{.*}}[[EPILOG_BB:BB[0-9_]+]]
;
-; DISABLE: .[[ELSE_LABEL]]: # %if.else
+; DISABLE: {{.*}}[[ELSE_LABEL]]: # %if.else
; Shift second argument by one and store into returned register.
; DISABLE: slwi 3, 4, 1
-; DISABLE: .[[EPILOG_BB]]: # %if.end
+; DISABLE: {{.*}}[[EPILOG_BB]]: # %if.end
;
; Epilogue code.
; CHECK: mtlr {{[0-9]+}}
; CHECK: blr
;
-; ENABLE: .[[ELSE_LABEL]]: # %if.else
+; ENABLE: {{.*}}[[ELSE_LABEL]]: # %if.else
; Shift second argument by one and store into returned register.
; ENABLE: slwi 3, 4, 1
; ENABLE-NEXT: blr
; Check that we do not perform the shrink-wrapping inside the loop even
; though that would be legal. The cost model must prevent that.
-; CHECK-LABEL: freqSaveAndRestoreOutsideLoop2:
+; CHECK-LABEL: {{.*}}freqSaveAndRestoreOutsideLoop2:
; Prologue code.
; Make sure we save the link register before the call
; CHECK: mflr {{[0-9]+}}
; Loop preheader
; CHECK-DAG: li [[SUM:[0-9]+]], 0
; CHECK-DAG: li [[IV:[0-9]+]], 10
-;
+;
; Loop body
-; CHECK: .[[LOOP:LBB[0-9_]+]]: # %for.body
-; CHECK: bl something
+; CHECK: {{.*}}[[LOOP:BB[0-9_]+]]: # %for.body
+; CHECK: bl {{.*}}something
+;
; CHECK-DAG: addi [[IV]], [[IV]], -1
-; CHECK-DAG: add [[SUM]], 3, [[SUM]]
-; CHECK-NEXT: cmplwi [[IV]], 0
-; CHECK-NEXT: bne 0, .[[LOOP]]
+; CHECK-DAG: add [[SUM]], 3, [[SUM]]
+; CHECK-DAG: cmplwi [[IV]], 0
+;
+; CHECK-NEXT: bne 0, {{.*}}[[LOOP]]
;
; Next BB
; CHECK: %for.exit
; Check with a more complex case that we do not have save within the loop and
; restore outside.
-; CHECK-LABEL: loopInfoSaveOutsideLoop:
+; CHECK-LABEL: {{.*}}loopInfoSaveOutsideLoop:
;
; ENABLE: cmplwi 3, 0
-; ENABLE-NEXT: beq 0, .[[ELSE_LABEL:LBB[0-9_]+]]
+; ENABLE-NEXT: beq 0, {{.*}}[[ELSE_LABEL:BB[0-9_]+]]
;
; Prologue code.
-; Make sure we save the link register
+; Make sure we save the link register
; CHECK: mflr {{[0-9]+}}
;
-; DISABLE: std
-; DISABLE-NEXT: std
-; DISABLE: cmplwi 3, 0
-; DISABLE-NEXT: beq 0, .[[ELSE_LABEL:LBB[0-9_]+]]
+; DISABLE-64-DAG: std {{[0-9]+}}
+; DISABLE-64-DAG: std {{[0-9]+}}
+; DISABLE-64-DAG: std {{[0-9]+}}
+; DISABLE-64-DAG: stdu 1,
+; DISABLE-64-DAG: cmplwi 3, 0
+;
+; DISABLE-32-DAG: stw {{[0-9]+}}
+; DISABLE-32-DAG: stw {{[0-9]+}}
+; DISABLE-32-DAG: stw {{[0-9]+}}
+; DISABLE-32-DAG: stwu 1,
+; DISABLE-32-DAG: cmplwi 3, 0
+;
+; DISABLE-NEXT: beq 0, {{.*}}[[ELSE_LABEL:BB[0-9_]+]]
;
; Loop preheader
; CHECK-DAG: li [[SUM:[0-9]+]], 0
; CHECK-DAG: li [[IV:[0-9]+]], 10
-;
+;
; Loop body
-; CHECK: .[[LOOP:LBB[0-9_]+]]: # %for.body
-; CHECK: bl something
+; CHECK: {{.*}}[[LOOP:BB[0-9_]+]]: # %for.body
+; CHECK: bl {{.*}}something
+;
; CHECK-DAG: addi [[IV]], [[IV]], -1
-; CHECK-DAG: add [[SUM]], 3, [[SUM]]
-; CHECK-NEXT: cmplwi [[IV]], 0
-; CHECK-NEXT: bne 0, .[[LOOP]]
-;
+; CHECK-DAG: add [[SUM]], 3, [[SUM]]
+; CHECK-DAG: cmplwi [[IV]], 0
+;
+; CHECK-NEXT: bne 0, {{.*}}[[LOOP]]
+;
; Next BB
-; CHECK: bl somethingElse
+; CHECK: bl {{.*}}somethingElse
; CHECK: slwi 3, [[SUM]], 3
;
; Jump to epilogue
-; DISABLE: b .[[EPILOG_BB:LBB[0-9_]+]]
+; DISABLE: b {{.*}}[[EPILOG_BB:BB[0-9_]+]]
;
-; DISABLE: .[[ELSE_LABEL]]: # %if.else
+; DISABLE: {{.*}}[[ELSE_LABEL]]: # %if.else
; Shift second argument by one and store into returned register.
; DISABLE: slwi 3, 4, 1
;
-; DISABLE: .[[EPILOG_BB]]: # %if.end
+; DISABLE: {{.*}}[[EPILOG_BB]]: # %if.end
+;
; Epilog code
; CHECK: mtlr {{[0-9]+}}
; CHECK: blr
-;
-; ENABLE: .[[ELSE_LABEL]]: # %if.else
+;
+; ENABLE: {{.*}}[[ELSE_LABEL]]: # %if.else
; Shift second argument by one and store into returned register.
; ENABLE: slwi 3, 4, 1
; ENABLE-NEXT: blr
; Check with a more complex case that we do not have restore within the loop and
; save outside.
-; CHECK-LABEL: loopInfoRestoreOutsideLoop:
+; CHECK-LABEL: {{.*}}loopInfoRestoreOutsideLoop:
;
; ENABLE: cmplwi 3, 0
-; ENABLE-NEXT: beq 0, .[[ELSE_LABEL:LBB[0-9_]+]]
+; ENABLE-NEXT: beq 0, {{.*}}[[ELSE_LABEL:BB[0-9_]+]]
;
; Prologue code.
; Make sure we save the link register
; CHECK: mflr {{[0-9]+}}
;
-; DISABLE: std
-; DISABLE-NEXT: std
-; DISABLE: cmplwi 3, 0
-; DISABLE-NEXT: beq 0, .[[ELSE_LABEL:LBB[0-9_]+]]
+; DISABLE-64-DAG: std {{[0-9]+}}
+; DISABLE-64-DAG: std {{[0-9]+}}
+; DISABLE-64-DAG: std {{[0-9]+}}
+; DISABLE-64-DAG: stdu 1,
+; DISABLE-64-DAG: cmplwi 3, 0
+;
+; DISABLE-32-DAG: stw {{[0-9]+}}
+; DISABLE-32-DAG: stw {{[0-9]+}}
+; DISABLE-32-DAG: stw {{[0-9]+}}
+; DISABLE-32-DAG: stwu 1,
+; DISABLE-32-DAG: cmplwi 3, 0
+;
+; DISABLE-NEXT: beq 0, {{.*}}[[ELSE_LABEL:BB[0-9_]+]]
;
-; CHECK: bl somethingElse
+; CHECK: bl {{.*}}somethingElse
;
; Loop preheader
; CHECK-DAG: li [[SUM:[0-9]+]], 0
; CHECK-DAG: li [[IV:[0-9]+]], 10
-;
+;
; Loop body
-; CHECK: .[[LOOP:LBB[0-9_]+]]: # %for.body
-; CHECK: bl something
+; CHECK: {{.*}}[[LOOP:BB[0-9_]+]]: # %for.body
+; CHECK: bl {{.*}}something
+;
; CHECK-DAG: addi [[IV]], [[IV]], -1
-; CHECK-DAG: add [[SUM]], 3, [[SUM]]
-; CHECK-NEXT: cmplwi [[IV]], 0
-; CHECK-NEXT: bne 0, .[[LOOP]]
+; CHECK-DAG: add [[SUM]], 3, [[SUM]]
+; CHECK-DAG: cmplwi [[IV]], 0
;
-; Next BB.
-; slwi 3, [[SUM]], 3
+; CHECK-NEXT: bne 0, {{.*}}[[LOOP]]
;
-; DISABLE: b .[[EPILOG_BB:LBB[0-9_]+]]
+; Next BB.
+; CHECK: slwi 3, [[SUM]], 3
;
-; DISABLE: .[[ELSE_LABEL]]: # %if.else
+; DISABLE: b {{.*}}[[EPILOG_BB:BB[0-9_]+]]
+;
+; DISABLE: {{.*}}[[ELSE_LABEL]]: # %if.else
; Shift second argument by one and store into returned register.
; DISABLE: slwi 3, 4, 1
-; DISABLE: .[[EPILOG_BB]]: # %if.end
+; DISABLE: {{.*}}[[EPILOG_BB]]: # %if.end
;
; Epilogue code.
; CHECK: mtlr {{[0-9]+}}
; CHECK: blr
;
-; ENABLE: .[[ELSE_LABEL]]: # %if.else
+; ENABLE: {{.*}}[[ELSE_LABEL]]: # %if.else
; Shift second argument by one and store into returned register.
; ENABLE: slwi 3, 4, 1
; ENABLE-NEXT: blr
}
; Check that we handle function with no frame information correctly.
-; CHECK-LABEL: emptyFrame:
+; CHECK-LABEL: {{.*}}emptyFrame:
; CHECK: # %entry
; CHECK-NEXT: li 3, 0
; CHECK-NEXT: blr
; Check that we handle inline asm correctly.
-; CHECK-LABEL: inlineAsm:
+; CHECK-LABEL: {{.*}}inlineAsm:
;
; ENABLE: cmplwi 3, 0
-; ENABLE-NEXT: beq 0, .[[ELSE_LABEL:LBB[0-9_]+]]
+; ENABLE-NEXT: beq 0, {{.*}}[[ELSE_LABEL:BB[0-9_]+]]
;
; Prologue code.
; Make sure we save the CSR used in the inline asm: r14
; ENABLE-DAG: li [[IV:[0-9]+]], 10
-; ENABLE-DAG: std 14, -[[STACK_OFFSET:[0-9]+]](1) # 8-byte Folded Spill
+; ENABLE-64-DAG: std 14, -[[STACK_OFFSET:[0-9]+]](1) # 8-byte Folded Spill
+; ENABLE-32-DAG: stw 14, -[[STACK_OFFSET:[0-9]+]](1) # 4-byte Folded Spill
;
; DISABLE: cmplwi 3, 0
-; DISABLE-NEXT: std 14, -[[STACK_OFFSET:[0-9]+]](1) # 8-byte Folded Spill
-; DISABLE-NEXT: beq 0, .[[ELSE_LABEL:LBB[0-9_]+]]
+; DISABLE-64-NEXT: std 14, -[[STACK_OFFSET:[0-9]+]](1) # 8-byte Folded Spill
+; DISABLE-32-NEXT: stw 14, -[[STACK_OFFSET:[0-9]+]](1) # 4-byte Folded Spill
+; DISABLE-NEXT: beq 0, {{.*}}[[ELSE_LABEL:BB[0-9_]+]]
; DISABLE: li [[IV:[0-9]+]], 10
;
; CHECK: nop
; CHECK: mtctr [[IV]]
;
-; CHECK: .[[LOOP_LABEL:LBB[0-9_]+]]: # %for.body
+; CHECK: {{.*}}[[LOOP_LABEL:BB[0-9_]+]]: # %for.body
; Inline asm statement.
; CHECK: addi 14, 14, 1
-; CHECK: bdnz .[[LOOP_LABEL]]
+; CHECK: bdnz {{.*}}[[LOOP_LABEL]]
;
; Epilogue code.
; CHECK: li 3, 0
-; CHECK-DAG: ld 14, -[[STACK_OFFSET]](1) # 8-byte Folded Reload
-; CHECK: nop
+; CHECK-64-DAG: ld 14, -[[STACK_OFFSET]](1) # 8-byte Folded Reload
+; CHECK-32-DAG: lwz 14, -[[STACK_OFFSET]](1) # 4-byte Folded Reload
+; CHECK-DAG: nop
; CHECK: blr
;
; CHECK: [[ELSE_LABEL]]
; CHECK-NEXT: slwi 3, 4, 1
-; DISABLE: ld 14, -[[STACK_OFFSET]](1) # 8-byte Folded Reload
+; DISABLE-64-NEXT: ld 14, -[[STACK_OFFSET]](1) # 8-byte Folded Reload
+; DISABLE-32-NEXT: lwz 14, -[[STACK_OFFSET]](1) # 4-byte Folded Reload
; CHECK-NEXT: blr
-;
define i32 @inlineAsm(i32 %cond, i32 %N) {
entry:
%tobool = icmp eq i32 %cond, 0
; Check that we handle calls to variadic functions correctly.
-; CHECK-LABEL: callVariadicFunc:
+; CHECK-LABEL: {{.*}}callVariadicFunc:
;
; ENABLE: cmplwi 3, 0
-; ENABLE-NEXT: beq 0, .[[ELSE_LABEL:LBB[0-9_]+]]
+; ENABLE-NEXT: beq 0, {{.*}}[[ELSE_LABEL:BB[0-9_]+]]
;
; Prologue code.
; CHECK: mflr {{[0-9]+}}
-;
+;
; DISABLE: cmplwi 3, 0
-; DISABLE-NEXT: beq 0, .[[ELSE_LABEL:LBB[0-9_]+]]
+; DISABLE-NEXT: beq 0, {{.*}}[[ELSE_LABEL:BB[0-9_]+]]
;
; Setup of the varags.
-; CHECK: mr 4, 3
-; CHECK-NEXT: mr 5, 3
-; CHECK-NEXT: mr 6, 3
-; CHECK-NEXT: mr 7, 3
-; CHECK-NEXT: mr 8, 3
-; CHECK-NEXT: mr 9, 3
-; CHECK-NEXT: bl someVariadicFunc
+; CHECK-64: mr 4, 3
+; CHECK-64-NEXT: mr 5, 3
+; CHECK-64-NEXT: mr 6, 3
+; CHECK-64-NEXT: mr 7, 3
+; CHECK-64-NEXT: mr 8, 3
+; CHECK-64-NEXT: mr 9, 3
+;
+; CHECK-32: mr 3, 4
+; CHECK-32-NEXT: mr 5, 4
+; CHECK-32-NEXT: mr 6, 4
+; CHECK-32-NEXT: mr 7, 4
+; CHECK-32-NEXT: mr 8, 4
+; CHECK-32-NEXT: mr 9, 4
+;
+; CHECK-NEXT: bl {{.*}}someVariadicFunc
; CHECK: slwi 3, 3, 3
-; DISABLE: b .[[EPILOGUE_BB:LBB[0-9_]+]]
+; DISABLE: b {{.*}}[[EPILOGUE_BB:BB[0-9_]+]]
;
; ENABLE: mtlr {{[0-9]+}}
; ENABLE-NEXT: blr
;
-; CHECK: .[[ELSE_LABEL]]: # %if.else
+; CHECK: {{.*}}[[ELSE_LABEL]]: # %if.else
; CHECK-NEXT: slwi 3, 4, 1
-;
-; DISABLE: .[[EPILOGUE_BB]]: # %if.end
+;
+; DISABLE: {{.*}}[[EPILOGUE_BB]]: # %if.end
; DISABLE: mtlr
; CHECK: blr
define i32 @callVariadicFunc(i32 %cond, i32 %N) {
; Although this is not incorrect to insert such code, it is useless
; and it hurts the binary size.
;
-; CHECK-LABEL: noreturn:
+; CHECK-LABEL: {{.*}}noreturn:
; DISABLE: mflr {{[0-9]+}}
;
; CHECK: cmplwi 3, 0
-; CHECK-NEXT: bne{{[-]?}} 0, .[[ABORT:LBB[0-9_]+]]
+; CHECK-NEXT: bne{{[-]?}} 0, {{.*}}[[ABORT:BB[0-9_]+]]
;
; CHECK: li 3, 42
;
;
; CHECK-NEXT: blr
;
-; CHECK: .[[ABORT]]: # %if.abort
-;
+; CHECK: {{.*}}[[ABORT]]: # %if.abort
; ENABLE: mflr {{[0-9]+}}
-;
-; CHECK: bl abort
+; CHECK: bl {{.*}}abort
; ENABLE-NOT: mtlr {{[0-9]+}}
define i32 @noreturn(i8 signext %bad_thing) {
entry:
; dominator is itself. In this case, we cannot perform shrink wrapping, but we
; should return gracefully and continue compilation.
; The only condition for this test is the compilation finishes correctly.
-;
-; CHECK-LABEL: infiniteloop
+;
+; CHECK-LABEL: {{.*}}infiniteloop
; CHECK: blr
define void @infiniteloop() {
entry:
}
; Another infinite loop test this time with a body bigger than just one block.
-; CHECK-LABEL: infiniteloop2
+; CHECK-LABEL: {{.*}}infiniteloop2
; CHECK: blr
define void @infiniteloop2() {
entry:
}
; Another infinite loop test this time with two nested infinite loop.
-; CHECK-LABEL: infiniteloop3
-; CHECK: Lfunc_begin[[FUNCNUM:[0-9]+]]
+; CHECK-LABEL: {{.*}}infiniteloop3
; CHECK: bclr
-; CHECK: Lfunc_end[[FUNCNUM]]
define void @infiniteloop3() {
entry:
br i1 undef, label %loop2a, label %body
; Test for a bug that was caused when save point was equal to restore point.
; Function Attrs: nounwind
-; CHECK-LABEL: transpose
+; CHECK-LABEL: {{.*}}transpose
;
; Store of callee-save register saved by shrink wrapping
; FIXME: Test disabled: Improved scheduling needs no spills/reloads any longer!
# RUN: llc -verify-machineinstrs -mcpu=pwr9 -mtriple powerpc64le-unknown-linux-gnu \
# RUN: -run-pass=shrink-wrap -o - %s | FileCheck %s
+# RUN: llc -verify-machineinstrs -mcpu=pwr9 -mtriple powerpc-ibm-aix-xcoff \
+# RUN: -run-pass=shrink-wrap -mattr=-altivec -o - %s | FileCheck %s
+# RUN: llc -verify-machineinstrs -mcpu=pwr9 -mtriple powerpc64-ibm-aix-xcoff \
+# RUN: -run-pass=shrink-wrap -mattr=-altivec -o - %s | FileCheck %s
--- |
; ModuleID = 'test.ll'
source_filename = "test.ll"
target datalayout = "e-m:e-i64:64-n32:64"
-
+
define signext i32 @shrinkwrapme(i32 signext %a, i32 signext %lim) {
entry:
%cmp5 = icmp sgt i32 %lim, 0
br i1 %cmp5, label %for.body.preheader, label %for.cond.cleanup
-
+
for.body.preheader: ; preds = %entry
%0 = add i32 %lim, -1
%1 = zext i32 %0 to i64
%2 = add nuw nsw i64 %1, 1
call void @llvm.set.loop.iterations.i64(i64 %2)
br label %for.body
-
+
for.cond.cleanup: ; preds = %for.body, %entry
%Ret.0.lcssa = phi i32 [ 0, %entry ], [ %3, %for.body ]
ret i32 %Ret.0.lcssa
-
+
for.body: ; preds = %for.body, %for.body.preheader
%Ret.06 = phi i32 [ %3, %for.body ], [ 0, %for.body.preheader ]
%3 = tail call i32 asm "add $0, $1, $2", "=r,r,r,~{r14},~{r15},~{r16},~{r17},~{r18},~{r19},~{r20},~{r21},~{r22},~{r23},~{r24},~{r25},~{r26},~{r27},~{r28},~{r29},~{r30},~{r31}"(i32 %a, i32 %Ret.06)
%4 = call i1 @llvm.loop.decrement.i64(i64 1)
br i1 %4, label %for.body, label %for.cond.cleanup
}
-
+
; Function Attrs: noduplicate nounwind
declare void @llvm.set.loop.iterations.i64(i64) #0
-
+
; Function Attrs: noduplicate nounwind
declare i1 @llvm.loop.decrement.i64(i64) #0
-
+
; Function Attrs: nounwind
declare void @llvm.stackprotector(i8*, i8**) #1
-
+
attributes #0 = { noduplicate nounwind }
attributes #1 = { nounwind }
bb.0.entry:
successors: %bb.2(0x50000000), %bb.1(0x30000000)
liveins: $x3, $x4
-
+
renamable $cr0 = CMPWI renamable $r4, 1
BCC 4, killed renamable $cr0, %bb.2
-
+
bb.1:
successors: %bb.3(0x80000000)
-
+
renamable $r4 = LI 0
B %bb.3
-
+
bb.2.for.body.preheader:
successors: %bb.4(0x80000000)
liveins: $x3, $x4
-
+
renamable $r4 = ADDI renamable $r4, -1, implicit killed $x4, implicit-def $x4
renamable $x4 = RLDICL killed renamable $x4, 0, 32
renamable $x4 = nuw nsw ADDI8 killed renamable $x4, 1
MTCTR8loop killed renamable $x4, implicit-def dead $ctr8
renamable $r4 = LI 0
B %bb.4
-
+
bb.3.for.cond.cleanup:
liveins: $r4
-
+
renamable $x3 = EXTSW_32_64 killed renamable $r4
BLR8 implicit $lr8, implicit $rm, implicit $x3
-
+
bb.4.for.body:
successors: %bb.4(0x7c000000), %bb.3(0x04000000)
liveins: $r4, $x3
-
+
INLINEASM &"add $0, $1, $2", 0, 131082, def renamable $r4, 131081, renamable $r3, 131081, killed renamable $r4, 12, implicit-def dead early-clobber $r14, 12, implicit-def dead early-clobber $r15, 12, implicit-def dead early-clobber $r16, 12, implicit-def dead early-clobber $r17, 12, implicit-def dead early-clobber $r18, 12, implicit-def dead early-clobber $r19, 12, implicit-def dead early-clobber $r20, 12, implicit-def dead early-clobber $r21, 12, implicit-def dead early-clobber $r22, 12, implicit-def dead early-clobber $r23, 12, implicit-def dead early-clobber $r24, 12, implicit-def dead early-clobber $r25, 12, implicit-def dead early-clobber $r26, 12, implicit-def dead early-clobber $r27, 12, implicit-def dead early-clobber $r28, 12, implicit-def dead early-clobber $r29, 12, implicit-def dead early-clobber $r30, 12, implicit-def dead early-clobber $r31
BDNZ8 %bb.4, implicit-def dead $ctr8, implicit $ctr8
B %bb.3