NumInstr < SkipThreshold && I != E; ++I) {
if (I->isBundle() || !I->isBundled())
+ // When a uniform loop is inside non-uniform control flow, the branch
+ // leaving the loop might be an S_CBRANCH_VCCNZ, which is never taken
+ // when EXEC = 0. We should skip the loop lest it becomes infinite.
+ if (I->getOpcode() == AMDGPU::S_CBRANCH_VCCNZ)
+ return true;
+
if (++NumInstr >= SkipThreshold)
return true;
}
--- /dev/null
+;RUN: llc -march=amdgcn -mcpu=verde < %s | FileCheck %s --check-prefix=CHECK
+
+; Test a simple uniform loop that lives inside non-uniform control flow.
+
+;CHECK-LABEL: {{^}}test1:
+;CHECK: s_cbranch_execz
+;CHECK: %loop_body
+define void @test1(<8 x i32> inreg %rsrc, <2 x i32> %addr.base, i32 %y, i32 %p) #0 {
+main_body:
+ %cc = icmp eq i32 %p, 0
+ br i1 %cc, label %out, label %loop_body
+
+loop_body:
+ %counter = phi i32 [ 0, %main_body ], [ %incr, %loop_body ]
+
+ ; Prevent the loop from being optimized out
+ call void asm sideeffect "", "" ()
+
+ %incr = add i32 %counter, 1
+ %lc = icmp sge i32 %incr, 1000
+ br i1 %lc, label %out, label %loop_body
+
+out:
+ ret void
+}
+
+attributes #0 = { "ShaderType"="0" }
+attributes #1 = { nounwind readonly }