From d4e81cd9dd897a698d85106d60c2c76ad096f0c1 Mon Sep 17 00:00:00 2001 From: Philip Reames Date: Thu, 12 Nov 2020 19:08:45 -0800 Subject: [PATCH] [Tests][LoopVect] Exercise basic uniform memory operand logic --- .../Transforms/LoopVectorize/X86/uniform_mem_op.ll | 577 +++++++++++++++++++++ 1 file changed, 577 insertions(+) create mode 100644 llvm/test/Transforms/LoopVectorize/X86/uniform_mem_op.ll diff --git a/llvm/test/Transforms/LoopVectorize/X86/uniform_mem_op.ll b/llvm/test/Transforms/LoopVectorize/X86/uniform_mem_op.ll new file mode 100644 index 0000000..72e42ff --- /dev/null +++ b/llvm/test/Transforms/LoopVectorize/X86/uniform_mem_op.ll @@ -0,0 +1,577 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py +; RUN: opt -S -force-vector-width=4 -loop-vectorize -mcpu=haswell < %s | FileCheck %s + +;; Basic functional tests for uniform loads and stores. These are cases kept +;; deliberately simple (and unoptimized by other passes) to feed the vectorizer +;; with particular input IR. + +target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128" +target triple = "x86_64-unknown-linux-gnu" + +define i32 @uniform_load(i32* align(4) %addr) { +; CHECK-LABEL: @uniform_load( +; CHECK-NEXT: entry: +; CHECK-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] +; CHECK: vector.ph: +; CHECK-NEXT: br label [[VECTOR_BODY:%.*]] +; CHECK: vector.body: +; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] +; CHECK-NEXT: [[TMP0:%.*]] = add i64 [[INDEX]], 0 +; CHECK-NEXT: [[TMP1:%.*]] = add i64 [[INDEX]], 4 +; CHECK-NEXT: [[TMP2:%.*]] = add i64 [[INDEX]], 8 +; CHECK-NEXT: [[TMP3:%.*]] = add i64 [[INDEX]], 12 +; CHECK-NEXT: [[TMP4:%.*]] = load i32, i32* [[ADDR:%.*]], align 4 +; CHECK-NEXT: [[TMP5:%.*]] = load i32, i32* [[ADDR]], align 4 +; CHECK-NEXT: [[TMP6:%.*]] = load i32, i32* [[ADDR]], align 4 +; CHECK-NEXT: [[TMP7:%.*]] = load i32, i32* [[ADDR]], align 4 +; CHECK-NEXT: [[TMP8:%.*]] = load i32, i32* [[ADDR]], align 4 +; CHECK-NEXT: [[TMP9:%.*]] = load i32, i32* [[ADDR]], align 4 +; CHECK-NEXT: [[TMP10:%.*]] = load i32, i32* [[ADDR]], align 4 +; CHECK-NEXT: [[TMP11:%.*]] = load i32, i32* [[ADDR]], align 4 +; CHECK-NEXT: [[TMP12:%.*]] = load i32, i32* [[ADDR]], align 4 +; CHECK-NEXT: [[TMP13:%.*]] = load i32, i32* [[ADDR]], align 4 +; CHECK-NEXT: [[TMP14:%.*]] = load i32, i32* [[ADDR]], align 4 +; CHECK-NEXT: [[TMP15:%.*]] = load i32, i32* [[ADDR]], align 4 +; CHECK-NEXT: [[TMP16:%.*]] = load i32, i32* [[ADDR]], align 4 +; CHECK-NEXT: [[TMP17:%.*]] = load i32, i32* [[ADDR]], align 4 +; CHECK-NEXT: [[TMP18:%.*]] = load i32, i32* [[ADDR]], align 4 +; CHECK-NEXT: [[TMP19:%.*]] = load i32, i32* [[ADDR]], align 4 +; CHECK-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], 16 +; CHECK-NEXT: [[TMP20:%.*]] = icmp eq i64 [[INDEX_NEXT]], 4096 +; CHECK-NEXT: br i1 [[TMP20]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], [[LOOP0:!llvm.loop !.*]] +; CHECK: middle.block: +; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 4097, 4096 +; CHECK-NEXT: br i1 [[CMP_N]], label [[LOOPEXIT:%.*]], label [[SCALAR_PH]] +; CHECK: scalar.ph: +; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 4096, [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ] +; CHECK-NEXT: br label [[FOR_BODY:%.*]] +; CHECK: for.body: +; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[IV_NEXT:%.*]], [[FOR_BODY]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ] +; CHECK-NEXT: [[LOAD:%.*]] = load i32, i32* [[ADDR]], align 4 +; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 +; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[IV]], 4096 +; CHECK-NEXT: br i1 [[EXITCOND]], label [[LOOPEXIT]], label [[FOR_BODY]], [[LOOP2:!llvm.loop !.*]] +; CHECK: loopexit: +; CHECK-NEXT: [[LOAD_LCSSA:%.*]] = phi i32 [ [[LOAD]], [[FOR_BODY]] ], [ [[TMP19]], [[MIDDLE_BLOCK]] ] +; CHECK-NEXT: ret i32 [[LOAD_LCSSA]] +; +entry: + br label %for.body + +for.body: + %iv = phi i64 [ %iv.next, %for.body ], [ 0, %entry ] + %load = load i32, i32* %addr + %iv.next = add nuw nsw i64 %iv, 1 + %exitcond = icmp eq i64 %iv, 4096 + br i1 %exitcond, label %loopexit, label %for.body + +loopexit: + ret i32 %load +} + +define i32 @uniform_load2(i32* align(4) %addr) { +; CHECK-LABEL: @uniform_load2( +; CHECK-NEXT: entry: +; CHECK-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] +; CHECK: vector.ph: +; CHECK-NEXT: br label [[VECTOR_BODY:%.*]] +; CHECK: vector.body: +; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] +; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP36:%.*]], [[VECTOR_BODY]] ] +; CHECK-NEXT: [[VEC_PHI1:%.*]] = phi <4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP37:%.*]], [[VECTOR_BODY]] ] +; CHECK-NEXT: [[VEC_PHI2:%.*]] = phi <4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP38:%.*]], [[VECTOR_BODY]] ] +; CHECK-NEXT: [[VEC_PHI3:%.*]] = phi <4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP39:%.*]], [[VECTOR_BODY]] ] +; CHECK-NEXT: [[TMP0:%.*]] = add i64 [[INDEX]], 0 +; CHECK-NEXT: [[TMP1:%.*]] = add i64 [[INDEX]], 4 +; CHECK-NEXT: [[TMP2:%.*]] = add i64 [[INDEX]], 8 +; CHECK-NEXT: [[TMP3:%.*]] = add i64 [[INDEX]], 12 +; CHECK-NEXT: [[TMP4:%.*]] = load i32, i32* [[ADDR:%.*]], align 4 +; CHECK-NEXT: [[TMP5:%.*]] = load i32, i32* [[ADDR]], align 4 +; CHECK-NEXT: [[TMP6:%.*]] = load i32, i32* [[ADDR]], align 4 +; CHECK-NEXT: [[TMP7:%.*]] = load i32, i32* [[ADDR]], align 4 +; CHECK-NEXT: [[TMP8:%.*]] = insertelement <4 x i32> undef, i32 [[TMP4]], i32 0 +; CHECK-NEXT: [[TMP9:%.*]] = insertelement <4 x i32> [[TMP8]], i32 [[TMP5]], i32 1 +; CHECK-NEXT: [[TMP10:%.*]] = insertelement <4 x i32> [[TMP9]], i32 [[TMP6]], i32 2 +; CHECK-NEXT: [[TMP11:%.*]] = insertelement <4 x i32> [[TMP10]], i32 [[TMP7]], i32 3 +; CHECK-NEXT: [[TMP12:%.*]] = load i32, i32* [[ADDR]], align 4 +; CHECK-NEXT: [[TMP13:%.*]] = load i32, i32* [[ADDR]], align 4 +; CHECK-NEXT: [[TMP14:%.*]] = load i32, i32* [[ADDR]], align 4 +; CHECK-NEXT: [[TMP15:%.*]] = load i32, i32* [[ADDR]], align 4 +; CHECK-NEXT: [[TMP16:%.*]] = insertelement <4 x i32> undef, i32 [[TMP12]], i32 0 +; CHECK-NEXT: [[TMP17:%.*]] = insertelement <4 x i32> [[TMP16]], i32 [[TMP13]], i32 1 +; CHECK-NEXT: [[TMP18:%.*]] = insertelement <4 x i32> [[TMP17]], i32 [[TMP14]], i32 2 +; CHECK-NEXT: [[TMP19:%.*]] = insertelement <4 x i32> [[TMP18]], i32 [[TMP15]], i32 3 +; CHECK-NEXT: [[TMP20:%.*]] = load i32, i32* [[ADDR]], align 4 +; CHECK-NEXT: [[TMP21:%.*]] = load i32, i32* [[ADDR]], align 4 +; CHECK-NEXT: [[TMP22:%.*]] = load i32, i32* [[ADDR]], align 4 +; CHECK-NEXT: [[TMP23:%.*]] = load i32, i32* [[ADDR]], align 4 +; CHECK-NEXT: [[TMP24:%.*]] = insertelement <4 x i32> undef, i32 [[TMP20]], i32 0 +; CHECK-NEXT: [[TMP25:%.*]] = insertelement <4 x i32> [[TMP24]], i32 [[TMP21]], i32 1 +; CHECK-NEXT: [[TMP26:%.*]] = insertelement <4 x i32> [[TMP25]], i32 [[TMP22]], i32 2 +; CHECK-NEXT: [[TMP27:%.*]] = insertelement <4 x i32> [[TMP26]], i32 [[TMP23]], i32 3 +; CHECK-NEXT: [[TMP28:%.*]] = load i32, i32* [[ADDR]], align 4 +; CHECK-NEXT: [[TMP29:%.*]] = load i32, i32* [[ADDR]], align 4 +; CHECK-NEXT: [[TMP30:%.*]] = load i32, i32* [[ADDR]], align 4 +; CHECK-NEXT: [[TMP31:%.*]] = load i32, i32* [[ADDR]], align 4 +; CHECK-NEXT: [[TMP32:%.*]] = insertelement <4 x i32> undef, i32 [[TMP28]], i32 0 +; CHECK-NEXT: [[TMP33:%.*]] = insertelement <4 x i32> [[TMP32]], i32 [[TMP29]], i32 1 +; CHECK-NEXT: [[TMP34:%.*]] = insertelement <4 x i32> [[TMP33]], i32 [[TMP30]], i32 2 +; CHECK-NEXT: [[TMP35:%.*]] = insertelement <4 x i32> [[TMP34]], i32 [[TMP31]], i32 3 +; CHECK-NEXT: [[TMP36]] = add <4 x i32> [[VEC_PHI]], [[TMP11]] +; CHECK-NEXT: [[TMP37]] = add <4 x i32> [[VEC_PHI1]], [[TMP19]] +; CHECK-NEXT: [[TMP38]] = add <4 x i32> [[VEC_PHI2]], [[TMP27]] +; CHECK-NEXT: [[TMP39]] = add <4 x i32> [[VEC_PHI3]], [[TMP35]] +; CHECK-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], 16 +; CHECK-NEXT: [[TMP40:%.*]] = icmp eq i64 [[INDEX_NEXT]], 4096 +; CHECK-NEXT: br i1 [[TMP40]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], [[LOOP4:!llvm.loop !.*]] +; CHECK: middle.block: +; CHECK-NEXT: [[BIN_RDX:%.*]] = add <4 x i32> [[TMP37]], [[TMP36]] +; CHECK-NEXT: [[BIN_RDX4:%.*]] = add <4 x i32> [[TMP38]], [[BIN_RDX]] +; CHECK-NEXT: [[BIN_RDX5:%.*]] = add <4 x i32> [[TMP39]], [[BIN_RDX4]] +; CHECK-NEXT: [[TMP41:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[BIN_RDX5]]) +; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 4097, 4096 +; CHECK-NEXT: br i1 [[CMP_N]], label [[LOOPEXIT:%.*]], label [[SCALAR_PH]] +; CHECK: scalar.ph: +; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 4096, [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ] +; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ 0, [[ENTRY]] ], [ [[TMP41]], [[MIDDLE_BLOCK]] ] +; CHECK-NEXT: br label [[FOR_BODY:%.*]] +; CHECK: for.body: +; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[IV_NEXT:%.*]], [[FOR_BODY]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ] +; CHECK-NEXT: [[ACCUM:%.*]] = phi i32 [ [[ACCUM_NEXT:%.*]], [[FOR_BODY]] ], [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ] +; CHECK-NEXT: [[LOAD:%.*]] = load i32, i32* [[ADDR]], align 4 +; CHECK-NEXT: [[ACCUM_NEXT]] = add i32 [[ACCUM]], [[LOAD]] +; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 +; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[IV]], 4096 +; CHECK-NEXT: br i1 [[EXITCOND]], label [[LOOPEXIT]], label [[FOR_BODY]], [[LOOP5:!llvm.loop !.*]] +; CHECK: loopexit: +; CHECK-NEXT: [[ACCUM_NEXT_LCSSA:%.*]] = phi i32 [ [[ACCUM_NEXT]], [[FOR_BODY]] ], [ [[TMP41]], [[MIDDLE_BLOCK]] ] +; CHECK-NEXT: ret i32 [[ACCUM_NEXT_LCSSA]] +; +entry: + br label %for.body + +for.body: + %iv = phi i64 [ %iv.next, %for.body ], [ 0, %entry ] + %accum = phi i32 [%accum.next, %for.body], [0, %entry] + %load = load i32, i32* %addr + %accum.next = add i32 %accum, %load + %iv.next = add nuw nsw i64 %iv, 1 + %exitcond = icmp eq i64 %iv, 4096 + br i1 %exitcond, label %loopexit, label %for.body + +loopexit: + ret i32 %accum.next +} + + +define void @uniform_store_uniform_value(i32* align(4) %addr) { +; CHECK-LABEL: @uniform_store_uniform_value( +; CHECK-NEXT: entry: +; CHECK-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] +; CHECK: vector.ph: +; CHECK-NEXT: br label [[VECTOR_BODY:%.*]] +; CHECK: vector.body: +; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] +; CHECK-NEXT: [[TMP0:%.*]] = add i64 [[INDEX]], 0 +; CHECK-NEXT: [[TMP1:%.*]] = add i64 [[INDEX]], 4 +; CHECK-NEXT: [[TMP2:%.*]] = add i64 [[INDEX]], 8 +; CHECK-NEXT: [[TMP3:%.*]] = add i64 [[INDEX]], 12 +; CHECK-NEXT: store i32 0, i32* [[ADDR:%.*]], align 4 +; CHECK-NEXT: store i32 0, i32* [[ADDR]], align 4 +; CHECK-NEXT: store i32 0, i32* [[ADDR]], align 4 +; CHECK-NEXT: store i32 0, i32* [[ADDR]], align 4 +; CHECK-NEXT: store i32 0, i32* [[ADDR]], align 4 +; CHECK-NEXT: store i32 0, i32* [[ADDR]], align 4 +; CHECK-NEXT: store i32 0, i32* [[ADDR]], align 4 +; CHECK-NEXT: store i32 0, i32* [[ADDR]], align 4 +; CHECK-NEXT: store i32 0, i32* [[ADDR]], align 4 +; CHECK-NEXT: store i32 0, i32* [[ADDR]], align 4 +; CHECK-NEXT: store i32 0, i32* [[ADDR]], align 4 +; CHECK-NEXT: store i32 0, i32* [[ADDR]], align 4 +; CHECK-NEXT: store i32 0, i32* [[ADDR]], align 4 +; CHECK-NEXT: store i32 0, i32* [[ADDR]], align 4 +; CHECK-NEXT: store i32 0, i32* [[ADDR]], align 4 +; CHECK-NEXT: store i32 0, i32* [[ADDR]], align 4 +; CHECK-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], 16 +; CHECK-NEXT: [[TMP4:%.*]] = icmp eq i64 [[INDEX_NEXT]], 4096 +; CHECK-NEXT: br i1 [[TMP4]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], [[LOOP6:!llvm.loop !.*]] +; CHECK: middle.block: +; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 4097, 4096 +; CHECK-NEXT: br i1 [[CMP_N]], label [[LOOPEXIT:%.*]], label [[SCALAR_PH]] +; CHECK: scalar.ph: +; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 4096, [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ] +; CHECK-NEXT: br label [[FOR_BODY:%.*]] +; CHECK: for.body: +; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[IV_NEXT:%.*]], [[FOR_BODY]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ] +; CHECK-NEXT: store i32 0, i32* [[ADDR]], align 4 +; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 +; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[IV]], 4096 +; CHECK-NEXT: br i1 [[EXITCOND]], label [[LOOPEXIT]], label [[FOR_BODY]], [[LOOP7:!llvm.loop !.*]] +; CHECK: loopexit: +; CHECK-NEXT: ret void +; +entry: + br label %for.body + +for.body: + %iv = phi i64 [ %iv.next, %for.body ], [ 0, %entry ] + store i32 0, i32* %addr + %iv.next = add nuw nsw i64 %iv, 1 + %exitcond = icmp eq i64 %iv, 4096 + br i1 %exitcond, label %loopexit, label %for.body + +loopexit: + ret void +} + +define void @uniform_store_varying_value(i32* align(4) %addr) { +; CHECK-LABEL: @uniform_store_varying_value( +; CHECK-NEXT: entry: +; CHECK-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] +; CHECK: vector.ph: +; CHECK-NEXT: br label [[VECTOR_BODY:%.*]] +; CHECK: vector.body: +; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] +; CHECK-NEXT: [[VEC_IND:%.*]] = phi <4 x i64> [ , [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ] +; CHECK-NEXT: [[VEC_IND4:%.*]] = phi <4 x i32> [ , [[VECTOR_PH]] ], [ [[VEC_IND_NEXT9:%.*]], [[VECTOR_BODY]] ] +; CHECK-NEXT: [[STEP_ADD:%.*]] = add <4 x i64> [[VEC_IND]], +; CHECK-NEXT: [[STEP_ADD1:%.*]] = add <4 x i64> [[STEP_ADD]], +; CHECK-NEXT: [[STEP_ADD2:%.*]] = add <4 x i64> [[STEP_ADD1]], +; CHECK-NEXT: [[TMP0:%.*]] = add i64 [[INDEX]], 0 +; CHECK-NEXT: [[TMP1:%.*]] = add i64 [[INDEX]], 1 +; CHECK-NEXT: [[TMP2:%.*]] = add i64 [[INDEX]], 2 +; CHECK-NEXT: [[TMP3:%.*]] = add i64 [[INDEX]], 3 +; CHECK-NEXT: [[TMP4:%.*]] = add i64 [[INDEX]], 4 +; CHECK-NEXT: [[TMP5:%.*]] = add i64 [[INDEX]], 5 +; CHECK-NEXT: [[TMP6:%.*]] = add i64 [[INDEX]], 6 +; CHECK-NEXT: [[TMP7:%.*]] = add i64 [[INDEX]], 7 +; CHECK-NEXT: [[TMP8:%.*]] = add i64 [[INDEX]], 8 +; CHECK-NEXT: [[TMP9:%.*]] = add i64 [[INDEX]], 9 +; CHECK-NEXT: [[TMP10:%.*]] = add i64 [[INDEX]], 10 +; CHECK-NEXT: [[TMP11:%.*]] = add i64 [[INDEX]], 11 +; CHECK-NEXT: [[TMP12:%.*]] = add i64 [[INDEX]], 12 +; CHECK-NEXT: [[TMP13:%.*]] = add i64 [[INDEX]], 13 +; CHECK-NEXT: [[TMP14:%.*]] = add i64 [[INDEX]], 14 +; CHECK-NEXT: [[TMP15:%.*]] = add i64 [[INDEX]], 15 +; CHECK-NEXT: [[STEP_ADD5:%.*]] = add <4 x i32> [[VEC_IND4]], +; CHECK-NEXT: [[STEP_ADD6:%.*]] = add <4 x i32> [[STEP_ADD5]], +; CHECK-NEXT: [[STEP_ADD7:%.*]] = add <4 x i32> [[STEP_ADD6]], +; CHECK-NEXT: [[TMP16:%.*]] = extractelement <4 x i32> [[VEC_IND4]], i32 0 +; CHECK-NEXT: store i32 [[TMP16]], i32* [[ADDR:%.*]], align 4 +; CHECK-NEXT: [[TMP17:%.*]] = extractelement <4 x i32> [[VEC_IND4]], i32 1 +; CHECK-NEXT: store i32 [[TMP17]], i32* [[ADDR]], align 4 +; CHECK-NEXT: [[TMP18:%.*]] = extractelement <4 x i32> [[VEC_IND4]], i32 2 +; CHECK-NEXT: store i32 [[TMP18]], i32* [[ADDR]], align 4 +; CHECK-NEXT: [[TMP19:%.*]] = extractelement <4 x i32> [[VEC_IND4]], i32 3 +; CHECK-NEXT: store i32 [[TMP19]], i32* [[ADDR]], align 4 +; CHECK-NEXT: [[TMP20:%.*]] = extractelement <4 x i32> [[STEP_ADD5]], i32 0 +; CHECK-NEXT: store i32 [[TMP20]], i32* [[ADDR]], align 4 +; CHECK-NEXT: [[TMP21:%.*]] = extractelement <4 x i32> [[STEP_ADD5]], i32 1 +; CHECK-NEXT: store i32 [[TMP21]], i32* [[ADDR]], align 4 +; CHECK-NEXT: [[TMP22:%.*]] = extractelement <4 x i32> [[STEP_ADD5]], i32 2 +; CHECK-NEXT: store i32 [[TMP22]], i32* [[ADDR]], align 4 +; CHECK-NEXT: [[TMP23:%.*]] = extractelement <4 x i32> [[STEP_ADD5]], i32 3 +; CHECK-NEXT: store i32 [[TMP23]], i32* [[ADDR]], align 4 +; CHECK-NEXT: [[TMP24:%.*]] = extractelement <4 x i32> [[STEP_ADD6]], i32 0 +; CHECK-NEXT: store i32 [[TMP24]], i32* [[ADDR]], align 4 +; CHECK-NEXT: [[TMP25:%.*]] = extractelement <4 x i32> [[STEP_ADD6]], i32 1 +; CHECK-NEXT: store i32 [[TMP25]], i32* [[ADDR]], align 4 +; CHECK-NEXT: [[TMP26:%.*]] = extractelement <4 x i32> [[STEP_ADD6]], i32 2 +; CHECK-NEXT: store i32 [[TMP26]], i32* [[ADDR]], align 4 +; CHECK-NEXT: [[TMP27:%.*]] = extractelement <4 x i32> [[STEP_ADD6]], i32 3 +; CHECK-NEXT: store i32 [[TMP27]], i32* [[ADDR]], align 4 +; CHECK-NEXT: [[TMP28:%.*]] = extractelement <4 x i32> [[STEP_ADD7]], i32 0 +; CHECK-NEXT: store i32 [[TMP28]], i32* [[ADDR]], align 4 +; CHECK-NEXT: [[TMP29:%.*]] = extractelement <4 x i32> [[STEP_ADD7]], i32 1 +; CHECK-NEXT: store i32 [[TMP29]], i32* [[ADDR]], align 4 +; CHECK-NEXT: [[TMP30:%.*]] = extractelement <4 x i32> [[STEP_ADD7]], i32 2 +; CHECK-NEXT: store i32 [[TMP30]], i32* [[ADDR]], align 4 +; CHECK-NEXT: [[TMP31:%.*]] = extractelement <4 x i32> [[STEP_ADD7]], i32 3 +; CHECK-NEXT: store i32 [[TMP31]], i32* [[ADDR]], align 4 +; CHECK-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], 16 +; CHECK-NEXT: [[VEC_IND_NEXT]] = add <4 x i64> [[STEP_ADD2]], +; CHECK-NEXT: [[VEC_IND_NEXT9]] = add <4 x i32> [[STEP_ADD7]], +; CHECK-NEXT: [[TMP32:%.*]] = icmp eq i64 [[INDEX_NEXT]], 4096 +; CHECK-NEXT: br i1 [[TMP32]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], [[LOOP8:!llvm.loop !.*]] +; CHECK: middle.block: +; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 4097, 4096 +; CHECK-NEXT: br i1 [[CMP_N]], label [[LOOPEXIT:%.*]], label [[SCALAR_PH]] +; CHECK: scalar.ph: +; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 4096, [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ] +; CHECK-NEXT: br label [[FOR_BODY:%.*]] +; CHECK: for.body: +; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[IV_NEXT:%.*]], [[FOR_BODY]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ] +; CHECK-NEXT: [[IV_I32:%.*]] = trunc i64 [[IV]] to i32 +; CHECK-NEXT: store i32 [[IV_I32]], i32* [[ADDR]], align 4 +; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 +; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[IV]], 4096 +; CHECK-NEXT: br i1 [[EXITCOND]], label [[LOOPEXIT]], label [[FOR_BODY]], [[LOOP9:!llvm.loop !.*]] +; CHECK: loopexit: +; CHECK-NEXT: ret void +; +entry: + br label %for.body + +for.body: + %iv = phi i64 [ %iv.next, %for.body ], [ 0, %entry ] + %iv.i32 = trunc i64 %iv to i32 + store i32 %iv.i32, i32* %addr + %iv.next = add nuw nsw i64 %iv, 1 + %exitcond = icmp eq i64 %iv, 4096 + br i1 %exitcond, label %loopexit, label %for.body + +loopexit: + ret void +} + +define void @uniform_rw(i32* align(4) %addr) { +; CHECK-LABEL: @uniform_rw( +; CHECK-NEXT: entry: +; CHECK-NEXT: br label [[FOR_BODY:%.*]] +; CHECK: for.body: +; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[IV_NEXT:%.*]], [[FOR_BODY]] ], [ 0, [[ENTRY:%.*]] ] +; CHECK-NEXT: [[LOAD:%.*]] = load i32, i32* [[ADDR:%.*]], align 4 +; CHECK-NEXT: [[INC:%.*]] = add i32 [[LOAD]], 1 +; CHECK-NEXT: store i32 [[INC]], i32* [[ADDR]], align 4 +; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 +; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[IV]], 4096 +; CHECK-NEXT: br i1 [[EXITCOND]], label [[LOOPEXIT:%.*]], label [[FOR_BODY]] +; CHECK: loopexit: +; CHECK-NEXT: ret void +; +entry: + br label %for.body + +for.body: + %iv = phi i64 [ %iv.next, %for.body ], [ 0, %entry ] + %load = load i32, i32* %addr + %inc = add i32 %load, 1 + store i32 %inc, i32* %addr + %iv.next = add nuw nsw i64 %iv, 1 + %exitcond = icmp eq i64 %iv, 4096 + br i1 %exitcond, label %loopexit, label %for.body + +loopexit: + ret void +} + +define void @uniform_copy(i32* %A, i32* %B) { +; CHECK-LABEL: @uniform_copy( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[B1:%.*]] = bitcast i32* [[B:%.*]] to i8* +; CHECK-NEXT: [[A2:%.*]] = bitcast i32* [[A:%.*]] to i8* +; CHECK-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_MEMCHECK:%.*]] +; CHECK: vector.memcheck: +; CHECK-NEXT: [[UGLYGEP:%.*]] = getelementptr i8, i8* [[B1]], i64 1 +; CHECK-NEXT: [[UGLYGEP3:%.*]] = getelementptr i8, i8* [[A2]], i64 1 +; CHECK-NEXT: [[BC:%.*]] = bitcast i32* [[B]] to i8* +; CHECK-NEXT: [[BC4:%.*]] = bitcast i32* [[A]] to i8* +; CHECK-NEXT: [[BOUND0:%.*]] = icmp ult i8* [[BC]], [[UGLYGEP3]] +; CHECK-NEXT: [[BOUND1:%.*]] = icmp ult i8* [[BC4]], [[UGLYGEP]] +; CHECK-NEXT: [[FOUND_CONFLICT:%.*]] = and i1 [[BOUND0]], [[BOUND1]] +; CHECK-NEXT: [[MEMCHECK_CONFLICT:%.*]] = and i1 [[FOUND_CONFLICT]], true +; CHECK-NEXT: br i1 [[MEMCHECK_CONFLICT]], label [[SCALAR_PH]], label [[VECTOR_PH:%.*]] +; CHECK: vector.ph: +; CHECK-NEXT: br label [[VECTOR_BODY:%.*]] +; CHECK: vector.body: +; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] +; CHECK-NEXT: [[TMP0:%.*]] = add i64 [[INDEX]], 0 +; CHECK-NEXT: [[TMP1:%.*]] = add i64 [[INDEX]], 4 +; CHECK-NEXT: [[TMP2:%.*]] = add i64 [[INDEX]], 8 +; CHECK-NEXT: [[TMP3:%.*]] = add i64 [[INDEX]], 12 +; CHECK-NEXT: [[TMP4:%.*]] = load i32, i32* [[A]], align 4, !alias.scope !10 +; CHECK-NEXT: [[TMP5:%.*]] = load i32, i32* [[A]], align 4, !alias.scope !10 +; CHECK-NEXT: [[TMP6:%.*]] = load i32, i32* [[A]], align 4, !alias.scope !10 +; CHECK-NEXT: [[TMP7:%.*]] = load i32, i32* [[A]], align 4, !alias.scope !10 +; CHECK-NEXT: [[TMP8:%.*]] = load i32, i32* [[A]], align 4, !alias.scope !10 +; CHECK-NEXT: [[TMP9:%.*]] = load i32, i32* [[A]], align 4, !alias.scope !10 +; CHECK-NEXT: [[TMP10:%.*]] = load i32, i32* [[A]], align 4, !alias.scope !10 +; CHECK-NEXT: [[TMP11:%.*]] = load i32, i32* [[A]], align 4, !alias.scope !10 +; CHECK-NEXT: [[TMP12:%.*]] = load i32, i32* [[A]], align 4, !alias.scope !10 +; CHECK-NEXT: [[TMP13:%.*]] = load i32, i32* [[A]], align 4, !alias.scope !10 +; CHECK-NEXT: [[TMP14:%.*]] = load i32, i32* [[A]], align 4, !alias.scope !10 +; CHECK-NEXT: [[TMP15:%.*]] = load i32, i32* [[A]], align 4, !alias.scope !10 +; CHECK-NEXT: [[TMP16:%.*]] = load i32, i32* [[A]], align 4, !alias.scope !10 +; CHECK-NEXT: [[TMP17:%.*]] = load i32, i32* [[A]], align 4, !alias.scope !10 +; CHECK-NEXT: [[TMP18:%.*]] = load i32, i32* [[A]], align 4, !alias.scope !10 +; CHECK-NEXT: [[TMP19:%.*]] = load i32, i32* [[A]], align 4, !alias.scope !10 +; CHECK-NEXT: store i32 [[TMP4]], i32* [[B]], align 4, !alias.scope !13, !noalias !10 +; CHECK-NEXT: store i32 [[TMP5]], i32* [[B]], align 4, !alias.scope !13, !noalias !10 +; CHECK-NEXT: store i32 [[TMP6]], i32* [[B]], align 4, !alias.scope !13, !noalias !10 +; CHECK-NEXT: store i32 [[TMP7]], i32* [[B]], align 4, !alias.scope !13, !noalias !10 +; CHECK-NEXT: store i32 [[TMP8]], i32* [[B]], align 4, !alias.scope !13, !noalias !10 +; CHECK-NEXT: store i32 [[TMP9]], i32* [[B]], align 4, !alias.scope !13, !noalias !10 +; CHECK-NEXT: store i32 [[TMP10]], i32* [[B]], align 4, !alias.scope !13, !noalias !10 +; CHECK-NEXT: store i32 [[TMP11]], i32* [[B]], align 4, !alias.scope !13, !noalias !10 +; CHECK-NEXT: store i32 [[TMP12]], i32* [[B]], align 4, !alias.scope !13, !noalias !10 +; CHECK-NEXT: store i32 [[TMP13]], i32* [[B]], align 4, !alias.scope !13, !noalias !10 +; CHECK-NEXT: store i32 [[TMP14]], i32* [[B]], align 4, !alias.scope !13, !noalias !10 +; CHECK-NEXT: store i32 [[TMP15]], i32* [[B]], align 4, !alias.scope !13, !noalias !10 +; CHECK-NEXT: store i32 [[TMP16]], i32* [[B]], align 4, !alias.scope !13, !noalias !10 +; CHECK-NEXT: store i32 [[TMP17]], i32* [[B]], align 4, !alias.scope !13, !noalias !10 +; CHECK-NEXT: store i32 [[TMP18]], i32* [[B]], align 4, !alias.scope !13, !noalias !10 +; CHECK-NEXT: store i32 [[TMP19]], i32* [[B]], align 4, !alias.scope !13, !noalias !10 +; CHECK-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], 16 +; CHECK-NEXT: [[TMP20:%.*]] = icmp eq i64 [[INDEX_NEXT]], 4096 +; CHECK-NEXT: br i1 [[TMP20]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], [[LOOP15:!llvm.loop !.*]] +; CHECK: middle.block: +; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 4097, 4096 +; CHECK-NEXT: br i1 [[CMP_N]], label [[LOOPEXIT:%.*]], label [[SCALAR_PH]] +; CHECK: scalar.ph: +; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 4096, [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ], [ 0, [[VECTOR_MEMCHECK]] ] +; CHECK-NEXT: br label [[FOR_BODY:%.*]] +; CHECK: for.body: +; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[IV_NEXT:%.*]], [[FOR_BODY]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ] +; CHECK-NEXT: [[LOAD:%.*]] = load i32, i32* [[A]], align 4 +; CHECK-NEXT: store i32 [[LOAD]], i32* [[B]], align 4 +; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 +; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[IV]], 4096 +; CHECK-NEXT: br i1 [[EXITCOND]], label [[LOOPEXIT]], label [[FOR_BODY]], [[LOOP16:!llvm.loop !.*]] +; CHECK: loopexit: +; CHECK-NEXT: ret void +; +entry: + br label %for.body + +for.body: + %iv = phi i64 [ %iv.next, %for.body ], [ 0, %entry ] + %load = load i32, i32* %A + store i32 %load, i32* %B + %iv.next = add nuw nsw i64 %iv, 1 + %exitcond = icmp eq i64 %iv, 4096 + br i1 %exitcond, label %loopexit, label %for.body + +loopexit: + ret void +} + + +declare void @init(i32*) + +;; Count the number of bits set in a bit vector -- key point of relevance is +;; that the byte load is uniform across 8 iterations at a time. +define i32 @test_count_bits(i8* %test_base) { +; CHECK-LABEL: @test_count_bits( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[ALLOCA:%.*]] = alloca [4096 x i32], align 4 +; CHECK-NEXT: [[BASE:%.*]] = bitcast [4096 x i32]* [[ALLOCA]] to i32* +; CHECK-NEXT: call void @init(i32* [[BASE]]) +; CHECK-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] +; CHECK: vector.ph: +; CHECK-NEXT: br label [[VECTOR_BODY:%.*]] +; CHECK: vector.body: +; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] +; CHECK-NEXT: [[VEC_IND:%.*]] = phi <4 x i64> [ , [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ] +; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP52:%.*]], [[VECTOR_BODY]] ] +; CHECK-NEXT: [[VEC_PHI2:%.*]] = phi <4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP53:%.*]], [[VECTOR_BODY]] ] +; CHECK-NEXT: [[STEP_ADD:%.*]] = add <4 x i64> [[VEC_IND]], +; CHECK-NEXT: [[TMP0:%.*]] = add i64 [[INDEX]], 0 +; CHECK-NEXT: [[TMP1:%.*]] = add i64 [[INDEX]], 1 +; CHECK-NEXT: [[TMP2:%.*]] = add i64 [[INDEX]], 2 +; CHECK-NEXT: [[TMP3:%.*]] = add i64 [[INDEX]], 3 +; CHECK-NEXT: [[TMP4:%.*]] = add i64 [[INDEX]], 4 +; CHECK-NEXT: [[TMP5:%.*]] = add i64 [[INDEX]], 5 +; CHECK-NEXT: [[TMP6:%.*]] = add i64 [[INDEX]], 6 +; CHECK-NEXT: [[TMP7:%.*]] = add i64 [[INDEX]], 7 +; CHECK-NEXT: [[TMP8:%.*]] = udiv <4 x i64> [[VEC_IND]], +; CHECK-NEXT: [[TMP9:%.*]] = udiv <4 x i64> [[STEP_ADD]], +; CHECK-NEXT: [[TMP10:%.*]] = extractelement <4 x i64> [[TMP8]], i32 0 +; CHECK-NEXT: [[TMP11:%.*]] = getelementptr inbounds i8, i8* [[TEST_BASE:%.*]], i64 [[TMP10]] +; CHECK-NEXT: [[TMP12:%.*]] = extractelement <4 x i64> [[TMP8]], i32 1 +; CHECK-NEXT: [[TMP13:%.*]] = getelementptr inbounds i8, i8* [[TEST_BASE]], i64 [[TMP12]] +; CHECK-NEXT: [[TMP14:%.*]] = extractelement <4 x i64> [[TMP8]], i32 2 +; CHECK-NEXT: [[TMP15:%.*]] = getelementptr inbounds i8, i8* [[TEST_BASE]], i64 [[TMP14]] +; CHECK-NEXT: [[TMP16:%.*]] = extractelement <4 x i64> [[TMP8]], i32 3 +; CHECK-NEXT: [[TMP17:%.*]] = getelementptr inbounds i8, i8* [[TEST_BASE]], i64 [[TMP16]] +; CHECK-NEXT: [[TMP18:%.*]] = extractelement <4 x i64> [[TMP9]], i32 0 +; CHECK-NEXT: [[TMP19:%.*]] = getelementptr inbounds i8, i8* [[TEST_BASE]], i64 [[TMP18]] +; CHECK-NEXT: [[TMP20:%.*]] = extractelement <4 x i64> [[TMP9]], i32 1 +; CHECK-NEXT: [[TMP21:%.*]] = getelementptr inbounds i8, i8* [[TEST_BASE]], i64 [[TMP20]] +; CHECK-NEXT: [[TMP22:%.*]] = extractelement <4 x i64> [[TMP9]], i32 2 +; CHECK-NEXT: [[TMP23:%.*]] = getelementptr inbounds i8, i8* [[TEST_BASE]], i64 [[TMP22]] +; CHECK-NEXT: [[TMP24:%.*]] = extractelement <4 x i64> [[TMP9]], i32 3 +; CHECK-NEXT: [[TMP25:%.*]] = getelementptr inbounds i8, i8* [[TEST_BASE]], i64 [[TMP24]] +; CHECK-NEXT: [[TMP26:%.*]] = load i8, i8* [[TMP11]], align 1 +; CHECK-NEXT: [[TMP27:%.*]] = load i8, i8* [[TMP13]], align 1 +; CHECK-NEXT: [[TMP28:%.*]] = load i8, i8* [[TMP15]], align 1 +; CHECK-NEXT: [[TMP29:%.*]] = load i8, i8* [[TMP17]], align 1 +; CHECK-NEXT: [[TMP30:%.*]] = insertelement <4 x i8> undef, i8 [[TMP26]], i32 0 +; CHECK-NEXT: [[TMP31:%.*]] = insertelement <4 x i8> [[TMP30]], i8 [[TMP27]], i32 1 +; CHECK-NEXT: [[TMP32:%.*]] = insertelement <4 x i8> [[TMP31]], i8 [[TMP28]], i32 2 +; CHECK-NEXT: [[TMP33:%.*]] = insertelement <4 x i8> [[TMP32]], i8 [[TMP29]], i32 3 +; CHECK-NEXT: [[TMP34:%.*]] = load i8, i8* [[TMP19]], align 1 +; CHECK-NEXT: [[TMP35:%.*]] = load i8, i8* [[TMP21]], align 1 +; CHECK-NEXT: [[TMP36:%.*]] = load i8, i8* [[TMP23]], align 1 +; CHECK-NEXT: [[TMP37:%.*]] = load i8, i8* [[TMP25]], align 1 +; CHECK-NEXT: [[TMP38:%.*]] = insertelement <4 x i8> undef, i8 [[TMP34]], i32 0 +; CHECK-NEXT: [[TMP39:%.*]] = insertelement <4 x i8> [[TMP38]], i8 [[TMP35]], i32 1 +; CHECK-NEXT: [[TMP40:%.*]] = insertelement <4 x i8> [[TMP39]], i8 [[TMP36]], i32 2 +; CHECK-NEXT: [[TMP41:%.*]] = insertelement <4 x i8> [[TMP40]], i8 [[TMP37]], i32 3 +; CHECK-NEXT: [[TMP42:%.*]] = urem <4 x i64> [[VEC_IND]], +; CHECK-NEXT: [[TMP43:%.*]] = urem <4 x i64> [[STEP_ADD]], +; CHECK-NEXT: [[TMP44:%.*]] = trunc <4 x i64> [[TMP42]] to <4 x i8> +; CHECK-NEXT: [[TMP45:%.*]] = trunc <4 x i64> [[TMP43]] to <4 x i8> +; CHECK-NEXT: [[TMP46:%.*]] = lshr <4 x i8> [[TMP33]], [[TMP44]] +; CHECK-NEXT: [[TMP47:%.*]] = lshr <4 x i8> [[TMP41]], [[TMP45]] +; CHECK-NEXT: [[TMP48:%.*]] = and <4 x i8> [[TMP46]], +; CHECK-NEXT: [[TMP49:%.*]] = and <4 x i8> [[TMP47]], +; CHECK-NEXT: [[TMP50:%.*]] = zext <4 x i8> [[TMP48]] to <4 x i32> +; CHECK-NEXT: [[TMP51:%.*]] = zext <4 x i8> [[TMP49]] to <4 x i32> +; CHECK-NEXT: [[TMP52]] = add <4 x i32> [[VEC_PHI]], [[TMP50]] +; CHECK-NEXT: [[TMP53]] = add <4 x i32> [[VEC_PHI2]], [[TMP51]] +; CHECK-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], 8 +; CHECK-NEXT: [[VEC_IND_NEXT]] = add <4 x i64> [[STEP_ADD]], +; CHECK-NEXT: [[TMP54:%.*]] = icmp eq i64 [[INDEX_NEXT]], 4096 +; CHECK-NEXT: br i1 [[TMP54]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], [[LOOP17:!llvm.loop !.*]] +; CHECK: middle.block: +; CHECK-NEXT: [[BIN_RDX:%.*]] = add <4 x i32> [[TMP53]], [[TMP52]] +; CHECK-NEXT: [[TMP55:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[BIN_RDX]]) +; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 4096, 4096 +; CHECK-NEXT: br i1 [[CMP_N]], label [[LOOP_EXIT:%.*]], label [[SCALAR_PH]] +; CHECK: scalar.ph: +; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 4096, [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ] +; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ 0, [[ENTRY]] ], [ [[TMP55]], [[MIDDLE_BLOCK]] ] +; CHECK-NEXT: br label [[LOOP:%.*]] +; CHECK: loop: +; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ] +; CHECK-NEXT: [[ACCUM:%.*]] = phi i32 [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ], [ [[ACCUM_NEXT:%.*]], [[LOOP]] ] +; CHECK-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1 +; CHECK-NEXT: [[BYTE:%.*]] = udiv i64 [[IV]], 8 +; CHECK-NEXT: [[TEST_ADDR:%.*]] = getelementptr inbounds i8, i8* [[TEST_BASE]], i64 [[BYTE]] +; CHECK-NEXT: [[EARLYCND:%.*]] = load i8, i8* [[TEST_ADDR]], align 1 +; CHECK-NEXT: [[BIT:%.*]] = urem i64 [[IV]], 8 +; CHECK-NEXT: [[BIT_TRUNC:%.*]] = trunc i64 [[BIT]] to i8 +; CHECK-NEXT: [[MASK:%.*]] = lshr i8 [[EARLYCND]], [[BIT_TRUNC]] +; CHECK-NEXT: [[TEST:%.*]] = and i8 [[MASK]], 1 +; CHECK-NEXT: [[VAL:%.*]] = zext i8 [[TEST]] to i32 +; CHECK-NEXT: [[ACCUM_NEXT]] = add i32 [[ACCUM]], [[VAL]] +; CHECK-NEXT: [[EXIT:%.*]] = icmp ugt i64 [[IV]], 4094 +; CHECK-NEXT: br i1 [[EXIT]], label [[LOOP_EXIT]], label [[LOOP]], [[LOOP18:!llvm.loop !.*]] +; CHECK: loop_exit: +; CHECK-NEXT: [[ACCUM_NEXT_LCSSA:%.*]] = phi i32 [ [[ACCUM_NEXT]], [[LOOP]] ], [ [[TMP55]], [[MIDDLE_BLOCK]] ] +; CHECK-NEXT: ret i32 [[ACCUM_NEXT_LCSSA]] +; +entry: + %alloca = alloca [4096 x i32] + %base = bitcast [4096 x i32]* %alloca to i32* + call void @init(i32* %base) + br label %loop +loop: + %iv = phi i64 [ 0, %entry ], [ %iv.next, %loop ] + %accum = phi i32 [ 0, %entry ], [ %accum.next, %loop ] + %iv.next = add i64 %iv, 1 + %byte = udiv i64 %iv, 8 + %test_addr = getelementptr inbounds i8, i8* %test_base, i64 %byte + %earlycnd = load i8, i8* %test_addr + %bit = urem i64 %iv, 8 + %bit.trunc = trunc i64 %bit to i8 + %mask = lshr i8 %earlycnd, %bit.trunc + %test = and i8 %mask, 1 + %val = zext i8 %test to i32 + %accum.next = add i32 %accum, %val + %exit = icmp ugt i64 %iv, 4094 + br i1 %exit, label %loop_exit, label %loop + +loop_exit: + ret i32 %accum.next +} -- 2.7.4