From: Alexey Bataev Date: Tue, 20 Feb 2018 18:11:50 +0000 (+0000) Subject: [SLP] Fix tests checks, NFC. X-Git-Tag: llvmorg-7.0.0-rc1~12424 X-Git-Url: http://review.tizen.org/git/?a=commitdiff_plain;h=47dfd249f0f5ff94d64d29bac8b4ee50b696012e;p=platform%2Fupstream%2Fllvm.git [SLP] Fix tests checks, NFC. llvm-svn: 325605 --- diff --git a/llvm/test/Transforms/SLPVectorizer/X86/addsub.ll b/llvm/test/Transforms/SLPVectorizer/X86/addsub.ll index 7056a14..3ef4bde 100644 --- a/llvm/test/Transforms/SLPVectorizer/X86/addsub.ll +++ b/llvm/test/Transforms/SLPVectorizer/X86/addsub.ll @@ -1,3 +1,4 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py ; RUN: opt < %s -basicaa -slp-vectorizer -S | FileCheck %s target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128" target triple = "x86_64-unknown-linux-gnu" @@ -12,14 +13,22 @@ target triple = "x86_64-unknown-linux-gnu" @fa = common global [4 x float] zeroinitializer, align 16 @fd = common global [4 x float] zeroinitializer, align 16 -; CHECK-LABEL: @addsub -; CHECK: %5 = add nsw <4 x i32> %3, %4 -; CHECK: %6 = add nsw <4 x i32> %2, %5 -; CHECK: %7 = sub nsw <4 x i32> %2, %5 -; CHECK: %8 = shufflevector <4 x i32> %6, <4 x i32> %7, <4 x i32> - ; Function Attrs: nounwind uwtable define void @addsub() #0 { +; CHECK-LABEL: @addsub( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[TMP0:%.*]] = load <4 x i32>, <4 x i32>* bitcast ([4 x i32]* @b to <4 x i32>*), align 4 +; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, <4 x i32>* bitcast ([4 x i32]* @c to <4 x i32>*), align 4 +; CHECK-NEXT: [[TMP2:%.*]] = add nsw <4 x i32> [[TMP0]], [[TMP1]] +; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, <4 x i32>* bitcast ([4 x i32]* @d to <4 x i32>*), align 4 +; CHECK-NEXT: [[TMP4:%.*]] = load <4 x i32>, <4 x i32>* bitcast ([4 x i32]* @e to <4 x i32>*), align 4 +; CHECK-NEXT: [[TMP5:%.*]] = add nsw <4 x i32> [[TMP3]], [[TMP4]] +; CHECK-NEXT: [[TMP6:%.*]] = add nsw <4 x i32> [[TMP2]], [[TMP5]] +; CHECK-NEXT: [[TMP7:%.*]] = sub nsw <4 x i32> [[TMP2]], [[TMP5]] +; CHECK-NEXT: [[TMP8:%.*]] = shufflevector <4 x i32> [[TMP6]], <4 x i32> [[TMP7]], <4 x i32> +; CHECK-NEXT: store <4 x i32> [[TMP8]], <4 x i32>* bitcast ([4 x i32]* @a to <4 x i32>*), align 4 +; CHECK-NEXT: ret void +; entry: %0 = load i32, i32* getelementptr inbounds ([4 x i32], [4 x i32]* @b, i32 0, i64 0), align 4 %1 = load i32, i32* getelementptr inbounds ([4 x i32], [4 x i32]* @c, i32 0, i64 0), align 4 @@ -56,14 +65,22 @@ entry: ret void } -; CHECK-LABEL: @subadd -; CHECK: %5 = add nsw <4 x i32> %3, %4 -; CHECK: %6 = sub nsw <4 x i32> %2, %5 -; CHECK: %7 = add nsw <4 x i32> %2, %5 -; CHECK: %8 = shufflevector <4 x i32> %6, <4 x i32> %7, <4 x i32> - ; Function Attrs: nounwind uwtable define void @subadd() #0 { +; CHECK-LABEL: @subadd( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[TMP0:%.*]] = load <4 x i32>, <4 x i32>* bitcast ([4 x i32]* @b to <4 x i32>*), align 4 +; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, <4 x i32>* bitcast ([4 x i32]* @c to <4 x i32>*), align 4 +; CHECK-NEXT: [[TMP2:%.*]] = add nsw <4 x i32> [[TMP0]], [[TMP1]] +; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, <4 x i32>* bitcast ([4 x i32]* @d to <4 x i32>*), align 4 +; CHECK-NEXT: [[TMP4:%.*]] = load <4 x i32>, <4 x i32>* bitcast ([4 x i32]* @e to <4 x i32>*), align 4 +; CHECK-NEXT: [[TMP5:%.*]] = add nsw <4 x i32> [[TMP3]], [[TMP4]] +; CHECK-NEXT: [[TMP6:%.*]] = sub nsw <4 x i32> [[TMP2]], [[TMP5]] +; CHECK-NEXT: [[TMP7:%.*]] = add nsw <4 x i32> [[TMP2]], [[TMP5]] +; CHECK-NEXT: [[TMP8:%.*]] = shufflevector <4 x i32> [[TMP6]], <4 x i32> [[TMP7]], <4 x i32> +; CHECK-NEXT: store <4 x i32> [[TMP8]], <4 x i32>* bitcast ([4 x i32]* @a to <4 x i32>*), align 4 +; CHECK-NEXT: ret void +; entry: %0 = load i32, i32* getelementptr inbounds ([4 x i32], [4 x i32]* @b, i32 0, i64 0), align 4 %1 = load i32, i32* getelementptr inbounds ([4 x i32], [4 x i32]* @c, i32 0, i64 0), align 4 @@ -100,12 +117,18 @@ entry: ret void } -; CHECK-LABEL: @faddfsub -; CHECK: %2 = fadd <4 x float> %0, %1 -; CHECK: %3 = fsub <4 x float> %0, %1 -; CHECK: %4 = shufflevector <4 x float> %2, <4 x float> %3, <4 x i32> ; Function Attrs: nounwind uwtable define void @faddfsub() #0 { +; CHECK-LABEL: @faddfsub( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[TMP0:%.*]] = load <4 x float>, <4 x float>* bitcast ([4 x float]* @fb to <4 x float>*), align 4 +; CHECK-NEXT: [[TMP1:%.*]] = load <4 x float>, <4 x float>* bitcast ([4 x float]* @fc to <4 x float>*), align 4 +; CHECK-NEXT: [[TMP2:%.*]] = fadd <4 x float> [[TMP0]], [[TMP1]] +; CHECK-NEXT: [[TMP3:%.*]] = fsub <4 x float> [[TMP0]], [[TMP1]] +; CHECK-NEXT: [[TMP4:%.*]] = shufflevector <4 x float> [[TMP2]], <4 x float> [[TMP3]], <4 x i32> +; CHECK-NEXT: store <4 x float> [[TMP4]], <4 x float>* bitcast ([4 x float]* @fa to <4 x float>*), align 4 +; CHECK-NEXT: ret void +; entry: %0 = load float, float* getelementptr inbounds ([4 x float], [4 x float]* @fb, i32 0, i64 0), align 4 %1 = load float, float* getelementptr inbounds ([4 x float], [4 x float]* @fc, i32 0, i64 0), align 4 @@ -126,12 +149,18 @@ entry: ret void } -; CHECK-LABEL: @fsubfadd -; CHECK: %2 = fsub <4 x float> %0, %1 -; CHECK: %3 = fadd <4 x float> %0, %1 -; CHECK: %4 = shufflevector <4 x float> %2, <4 x float> %3, <4 x i32> ; Function Attrs: nounwind uwtable define void @fsubfadd() #0 { +; CHECK-LABEL: @fsubfadd( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[TMP0:%.*]] = load <4 x float>, <4 x float>* bitcast ([4 x float]* @fb to <4 x float>*), align 4 +; CHECK-NEXT: [[TMP1:%.*]] = load <4 x float>, <4 x float>* bitcast ([4 x float]* @fc to <4 x float>*), align 4 +; CHECK-NEXT: [[TMP2:%.*]] = fsub <4 x float> [[TMP0]], [[TMP1]] +; CHECK-NEXT: [[TMP3:%.*]] = fadd <4 x float> [[TMP0]], [[TMP1]] +; CHECK-NEXT: [[TMP4:%.*]] = shufflevector <4 x float> [[TMP2]], <4 x float> [[TMP3]], <4 x i32> +; CHECK-NEXT: store <4 x float> [[TMP4]], <4 x float>* bitcast ([4 x float]* @fa to <4 x float>*), align 4 +; CHECK-NEXT: ret void +; entry: %0 = load float, float* getelementptr inbounds ([4 x float], [4 x float]* @fb, i32 0, i64 0), align 4 %1 = load float, float* getelementptr inbounds ([4 x float], [4 x float]* @fc, i32 0, i64 0), align 4 @@ -152,12 +181,28 @@ entry: ret void } -; CHECK-LABEL: @No_faddfsub -; CHECK-NOT: fadd <4 x float> -; CHECK-NOT: fsub <4 x float> -; CHECK-NOT: shufflevector ; Function Attrs: nounwind uwtable define void @No_faddfsub() #0 { +; CHECK-LABEL: @No_faddfsub( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[TMP0:%.*]] = load float, float* getelementptr inbounds ([4 x float], [4 x float]* @fb, i32 0, i64 0), align 4 +; CHECK-NEXT: [[TMP1:%.*]] = load float, float* getelementptr inbounds ([4 x float], [4 x float]* @fc, i32 0, i64 0), align 4 +; CHECK-NEXT: [[ADD:%.*]] = fadd float [[TMP0]], [[TMP1]] +; CHECK-NEXT: store float [[ADD]], float* getelementptr inbounds ([4 x float], [4 x float]* @fa, i32 0, i64 0), align 4 +; CHECK-NEXT: [[TMP2:%.*]] = load float, float* getelementptr inbounds ([4 x float], [4 x float]* @fb, i32 0, i64 1), align 4 +; CHECK-NEXT: [[TMP3:%.*]] = load float, float* getelementptr inbounds ([4 x float], [4 x float]* @fc, i32 0, i64 1), align 4 +; CHECK-NEXT: [[ADD1:%.*]] = fadd float [[TMP2]], [[TMP3]] +; CHECK-NEXT: store float [[ADD1]], float* getelementptr inbounds ([4 x float], [4 x float]* @fa, i32 0, i64 1), align 4 +; CHECK-NEXT: [[TMP4:%.*]] = load float, float* getelementptr inbounds ([4 x float], [4 x float]* @fb, i32 0, i64 2), align 4 +; CHECK-NEXT: [[TMP5:%.*]] = load float, float* getelementptr inbounds ([4 x float], [4 x float]* @fc, i32 0, i64 2), align 4 +; CHECK-NEXT: [[ADD2:%.*]] = fadd float [[TMP4]], [[TMP5]] +; CHECK-NEXT: store float [[ADD2]], float* getelementptr inbounds ([4 x float], [4 x float]* @fa, i32 0, i64 2), align 4 +; CHECK-NEXT: [[TMP6:%.*]] = load float, float* getelementptr inbounds ([4 x float], [4 x float]* @fb, i32 0, i64 3), align 4 +; CHECK-NEXT: [[TMP7:%.*]] = load float, float* getelementptr inbounds ([4 x float], [4 x float]* @fc, i32 0, i64 3), align 4 +; CHECK-NEXT: [[SUB:%.*]] = fsub float [[TMP6]], [[TMP7]] +; CHECK-NEXT: store float [[SUB]], float* getelementptr inbounds ([4 x float], [4 x float]* @fa, i32 0, i64 3), align 4 +; CHECK-NEXT: ret void +; entry: %0 = load float, float* getelementptr inbounds ([4 x float], [4 x float]* @fb, i32 0, i64 0), align 4 %1 = load float, float* getelementptr inbounds ([4 x float], [4 x float]* @fc, i32 0, i64 0), align 4 @@ -184,11 +229,16 @@ entry: ; fc[2] = fa[2]+fb[2]; ; fc[3] = fa[3]-fb[3]; -; CHECK-LABEL: @reorder_alt -; CHECK: %3 = fadd <4 x float> %1, %2 -; CHECK: %4 = fsub <4 x float> %1, %2 -; CHECK: %5 = shufflevector <4 x float> %3, <4 x float> %4, <4 x i32> define void @reorder_alt() #0 { +; CHECK-LABEL: @reorder_alt( +; CHECK-NEXT: [[TMP1:%.*]] = load <4 x float>, <4 x float>* bitcast ([4 x float]* @fa to <4 x float>*), align 4 +; CHECK-NEXT: [[TMP2:%.*]] = load <4 x float>, <4 x float>* bitcast ([4 x float]* @fb to <4 x float>*), align 4 +; CHECK-NEXT: [[TMP3:%.*]] = fadd <4 x float> [[TMP1]], [[TMP2]] +; CHECK-NEXT: [[TMP4:%.*]] = fsub <4 x float> [[TMP1]], [[TMP2]] +; CHECK-NEXT: [[TMP5:%.*]] = shufflevector <4 x float> [[TMP3]], <4 x float> [[TMP4]], <4 x i32> +; CHECK-NEXT: store <4 x float> [[TMP5]], <4 x float>* bitcast ([4 x float]* @fc to <4 x float>*), align 4 +; CHECK-NEXT: ret void +; %1 = load float, float* getelementptr inbounds ([4 x float], [4 x float]* @fb, i32 0, i64 0), align 4 %2 = load float, float* getelementptr inbounds ([4 x float], [4 x float]* @fa, i32 0, i64 0), align 4 %3 = fadd float %1, %2 @@ -212,16 +262,22 @@ define void @reorder_alt() #0 { ; fc[0] = fa[0]+(fb[0]-fd[0]); ; fc[1] = fa[1]-(fb[1]+fd[1]); ; fc[2] = fa[2]+(fb[2]-fd[2]); -; fc[3] = fa[3]-(fd[3]+fb[3]); //swapped fd and fb +; fc[3] = fa[3]-(fd[3]+fb[3]); //swapped fd and fb -; CHECK-LABEL: @reorder_alt_subTree -; CHECK: %4 = fsub <4 x float> %3, %2 -; CHECK: %5 = fadd <4 x float> %3, %2 -; CHECK: %6 = shufflevector <4 x float> %4, <4 x float> %5, <4 x i32> -; CHECK: %7 = fadd <4 x float> %1, %6 -; CHECK: %8 = fsub <4 x float> %1, %6 -; CHECK: %9 = shufflevector <4 x float> %7, <4 x float> %8, <4 x i32> define void @reorder_alt_subTree() #0 { +; CHECK-LABEL: @reorder_alt_subTree( +; CHECK-NEXT: [[TMP1:%.*]] = load <4 x float>, <4 x float>* bitcast ([4 x float]* @fa to <4 x float>*), align 4 +; CHECK-NEXT: [[TMP2:%.*]] = load <4 x float>, <4 x float>* bitcast ([4 x float]* @fd to <4 x float>*), align 4 +; CHECK-NEXT: [[TMP3:%.*]] = load <4 x float>, <4 x float>* bitcast ([4 x float]* @fb to <4 x float>*), align 4 +; CHECK-NEXT: [[TMP4:%.*]] = fsub <4 x float> [[TMP3]], [[TMP2]] +; CHECK-NEXT: [[TMP5:%.*]] = fadd <4 x float> [[TMP3]], [[TMP2]] +; CHECK-NEXT: [[TMP6:%.*]] = shufflevector <4 x float> [[TMP4]], <4 x float> [[TMP5]], <4 x i32> +; CHECK-NEXT: [[TMP7:%.*]] = fadd <4 x float> [[TMP1]], [[TMP6]] +; CHECK-NEXT: [[TMP8:%.*]] = fsub <4 x float> [[TMP1]], [[TMP6]] +; CHECK-NEXT: [[TMP9:%.*]] = shufflevector <4 x float> [[TMP7]], <4 x float> [[TMP8]], <4 x i32> +; CHECK-NEXT: store <4 x float> [[TMP9]], <4 x float>* bitcast ([4 x float]* @fc to <4 x float>*), align 4 +; CHECK-NEXT: ret void +; %1 = load float, float* getelementptr inbounds ([4 x float], [4 x float]* @fa, i32 0, i64 0), align 4 %2 = load float, float* getelementptr inbounds ([4 x float], [4 x float]* @fb, i32 0, i64 0), align 4 %3 = load float, float* getelementptr inbounds ([4 x float], [4 x float]* @fd, i32 0, i64 0), align 4 @@ -251,13 +307,28 @@ define void @reorder_alt_subTree() #0 { ; Check vectorization of following code for double data type- ; c[0] = (a[0]+b[0])-d[0]; -; c[1] = d[1]+(a[1]+b[1]); //swapped d[1] and (a[1]+b[1]) +; c[1] = d[1]+(a[1]+b[1]); //swapped d[1] and (a[1]+b[1]) -; CHECK-LABEL: @reorder_alt_rightsubTree -; CHECK: fadd <2 x double> -; CHECK: fsub <2 x double> -; CHECK: shufflevector <2 x double> define void @reorder_alt_rightsubTree(double* nocapture %c, double* noalias nocapture readonly %a, double* noalias nocapture readonly %b, double* noalias nocapture readonly %d) { +; CHECK-LABEL: @reorder_alt_rightsubTree( +; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds double, double* [[D:%.*]], i64 1 +; CHECK-NEXT: [[TMP2:%.*]] = bitcast double* [[D]] to <2 x double>* +; CHECK-NEXT: [[TMP3:%.*]] = load <2 x double>, <2 x double>* [[TMP2]], align 8 +; CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds double, double* [[A:%.*]], i64 1 +; CHECK-NEXT: [[TMP5:%.*]] = bitcast double* [[A]] to <2 x double>* +; CHECK-NEXT: [[TMP6:%.*]] = load <2 x double>, <2 x double>* [[TMP5]], align 8 +; CHECK-NEXT: [[TMP7:%.*]] = getelementptr inbounds double, double* [[B:%.*]], i64 1 +; CHECK-NEXT: [[TMP8:%.*]] = bitcast double* [[B]] to <2 x double>* +; CHECK-NEXT: [[TMP9:%.*]] = load <2 x double>, <2 x double>* [[TMP8]], align 8 +; CHECK-NEXT: [[TMP10:%.*]] = fadd <2 x double> [[TMP6]], [[TMP9]] +; CHECK-NEXT: [[TMP11:%.*]] = fsub <2 x double> [[TMP10]], [[TMP3]] +; CHECK-NEXT: [[TMP12:%.*]] = fadd <2 x double> [[TMP10]], [[TMP3]] +; CHECK-NEXT: [[TMP13:%.*]] = shufflevector <2 x double> [[TMP11]], <2 x double> [[TMP12]], <2 x i32> +; CHECK-NEXT: [[TMP14:%.*]] = getelementptr inbounds double, double* [[C:%.*]], i64 1 +; CHECK-NEXT: [[TMP15:%.*]] = bitcast double* [[C]] to <2 x double>* +; CHECK-NEXT: store <2 x double> [[TMP13]], <2 x double>* [[TMP15]], align 8 +; CHECK-NEXT: ret void +; %1 = load double, double* %a %2 = load double, double* %b %3 = fadd double %1, %2 @@ -283,13 +354,28 @@ define void @reorder_alt_rightsubTree(double* nocapture %c, double* noalias noca ; fc[2] = fa[2]+fb[2]; ; fc[3] = fb[3]-fa[3]; ; In the above code we can swap the 1st and 2nd operation as fadd is commutative -; but not 2nd or 4th as fsub is not commutative. +; but not 2nd or 4th as fsub is not commutative. -; CHECK-LABEL: @no_vec_shuff_reorder -; CHECK-NOT: fadd <4 x float> -; CHECK-NOT: fsub <4 x float> -; CHECK-NOT: shufflevector define void @no_vec_shuff_reorder() #0 { +; CHECK-LABEL: @no_vec_shuff_reorder( +; CHECK-NEXT: [[TMP1:%.*]] = load float, float* getelementptr inbounds ([4 x float], [4 x float]* @fb, i32 0, i64 0), align 4 +; CHECK-NEXT: [[TMP2:%.*]] = load float, float* getelementptr inbounds ([4 x float], [4 x float]* @fa, i32 0, i64 0), align 4 +; CHECK-NEXT: [[TMP3:%.*]] = fadd float [[TMP1]], [[TMP2]] +; CHECK-NEXT: store float [[TMP3]], float* getelementptr inbounds ([4 x float], [4 x float]* @fc, i32 0, i64 0), align 4 +; CHECK-NEXT: [[TMP4:%.*]] = load float, float* getelementptr inbounds ([4 x float], [4 x float]* @fa, i32 0, i64 1), align 4 +; CHECK-NEXT: [[TMP5:%.*]] = load float, float* getelementptr inbounds ([4 x float], [4 x float]* @fb, i32 0, i64 1), align 4 +; CHECK-NEXT: [[TMP6:%.*]] = fsub float [[TMP4]], [[TMP5]] +; CHECK-NEXT: store float [[TMP6]], float* getelementptr inbounds ([4 x float], [4 x float]* @fc, i32 0, i64 1), align 4 +; CHECK-NEXT: [[TMP7:%.*]] = load float, float* getelementptr inbounds ([4 x float], [4 x float]* @fa, i32 0, i64 2), align 4 +; CHECK-NEXT: [[TMP8:%.*]] = load float, float* getelementptr inbounds ([4 x float], [4 x float]* @fb, i32 0, i64 2), align 4 +; CHECK-NEXT: [[TMP9:%.*]] = fadd float [[TMP7]], [[TMP8]] +; CHECK-NEXT: store float [[TMP9]], float* getelementptr inbounds ([4 x float], [4 x float]* @fc, i32 0, i64 2), align 4 +; CHECK-NEXT: [[TMP10:%.*]] = load float, float* getelementptr inbounds ([4 x float], [4 x float]* @fb, i32 0, i64 3), align 4 +; CHECK-NEXT: [[TMP11:%.*]] = load float, float* getelementptr inbounds ([4 x float], [4 x float]* @fa, i32 0, i64 3), align 4 +; CHECK-NEXT: [[TMP12:%.*]] = fsub float [[TMP10]], [[TMP11]] +; CHECK-NEXT: store float [[TMP12]], float* getelementptr inbounds ([4 x float], [4 x float]* @fc, i32 0, i64 3), align 4 +; CHECK-NEXT: ret void +; %1 = load float, float* getelementptr inbounds ([4 x float], [4 x float]* @fb, i32 0, i64 0), align 4 %2 = load float, float* getelementptr inbounds ([4 x float], [4 x float]* @fa, i32 0, i64 0), align 4 %3 = fadd float %1, %2 diff --git a/llvm/test/Transforms/SLPVectorizer/X86/compare-reduce.ll b/llvm/test/Transforms/SLPVectorizer/X86/compare-reduce.ll index be17c5d..ec29f84 100644 --- a/llvm/test/Transforms/SLPVectorizer/X86/compare-reduce.ll +++ b/llvm/test/Transforms/SLPVectorizer/X86/compare-reduce.ll @@ -1,3 +1,4 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py ; RUN: opt < %s -basicaa -slp-vectorizer -dce -S -mtriple=x86_64-apple-macosx10.8.0 -mcpu=corei7-avx | FileCheck %s target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128" @@ -5,15 +6,37 @@ target triple = "x86_64-apple-macosx10.7.0" @.str = private unnamed_addr constant [6 x i8] c"bingo\00", align 1 -;CHECK-LABEL: @reduce_compare( -;CHECK: load <2 x double> -;CHECK: fmul <2 x double> -;CHECK: fmul <2 x double> -;CHECK: fadd <2 x double> -;CHECK: extractelement -;CHECK: extractelement -;CHECK: ret define void @reduce_compare(double* nocapture %A, i32 %n) { +; CHECK-LABEL: @reduce_compare( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[CONV:%.*]] = sitofp i32 [[N:%.*]] to double +; CHECK-NEXT: [[TMP0:%.*]] = insertelement <2 x double> undef, double [[CONV]], i32 0 +; CHECK-NEXT: [[TMP1:%.*]] = insertelement <2 x double> [[TMP0]], double [[CONV]], i32 1 +; CHECK-NEXT: br label [[FOR_BODY:%.*]] +; CHECK: for.body: +; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_INC:%.*]] ] +; CHECK-NEXT: [[TMP2:%.*]] = shl nsw i64 [[INDVARS_IV]], 1 +; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds double, double* [[A:%.*]], i64 [[TMP2]] +; CHECK-NEXT: [[TMP3:%.*]] = bitcast double* [[ARRAYIDX]] to <2 x double>* +; CHECK-NEXT: [[TMP4:%.*]] = load <2 x double>, <2 x double>* [[TMP3]], align 8 +; CHECK-NEXT: [[TMP5:%.*]] = fmul <2 x double> [[TMP1]], [[TMP4]] +; CHECK-NEXT: [[TMP6:%.*]] = fmul <2 x double> , [[TMP5]] +; CHECK-NEXT: [[TMP7:%.*]] = fadd <2 x double> , [[TMP6]] +; CHECK-NEXT: [[TMP8:%.*]] = extractelement <2 x double> [[TMP7]], i32 0 +; CHECK-NEXT: [[TMP9:%.*]] = extractelement <2 x double> [[TMP7]], i32 1 +; CHECK-NEXT: [[CMP11:%.*]] = fcmp ogt double [[TMP8]], [[TMP9]] +; CHECK-NEXT: br i1 [[CMP11]], label [[IF_THEN:%.*]], label [[FOR_INC]] +; CHECK: if.then: +; CHECK-NEXT: [[CALL:%.*]] = tail call i32 (i8*, ...) @printf(i8* getelementptr inbounds ([6 x i8], [6 x i8]* @.str, i64 0, i64 0)) +; CHECK-NEXT: br label [[FOR_INC]] +; CHECK: for.inc: +; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add i64 [[INDVARS_IV]], 1 +; CHECK-NEXT: [[LFTR_WIDEIV:%.*]] = trunc i64 [[INDVARS_IV_NEXT]] to i32 +; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[LFTR_WIDEIV]], 100 +; CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_END:%.*]], label [[FOR_BODY]] +; CHECK: for.end: +; CHECK-NEXT: ret void +; entry: %conv = sitofp i32 %n to double br label %for.body diff --git a/llvm/test/Transforms/SLPVectorizer/X86/funclet.ll b/llvm/test/Transforms/SLPVectorizer/X86/funclet.ll index 11d0bd9..ae24e92 100644 --- a/llvm/test/Transforms/SLPVectorizer/X86/funclet.ll +++ b/llvm/test/Transforms/SLPVectorizer/X86/funclet.ll @@ -1,11 +1,37 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py ; RUN: opt -S -slp-vectorizer < %s | FileCheck %s target datalayout = "e-m:x-p:32:32-i64:64-f80:32-n8:16:32-a:0:32-S32" target triple = "i686-pc-windows-msvc18.0.0" define void @test1(double* %a, double* %b, double* %c) #0 personality i32 (...)* @__CxxFrameHandler3 { +; CHECK-LABEL: @test1( +; CHECK-NEXT: entry: +; CHECK-NEXT: invoke void @_CxxThrowException(i8* null, i8* null) +; CHECK-NEXT: to label [[UNREACHABLE:%.*]] unwind label [[CATCH_DISPATCH:%.*]] +; CHECK: catch.dispatch: +; CHECK-NEXT: [[TMP0:%.*]] = catchswitch within none [label %catch] unwind to caller +; CHECK: catch: +; CHECK-NEXT: [[TMP1:%.*]] = catchpad within [[TMP0]] [i8* null, i32 64, i8* null] +; CHECK-NEXT: [[ARRAYIDX3:%.*]] = getelementptr inbounds double, double* [[A:%.*]], i64 1 +; CHECK-NEXT: [[TMP2:%.*]] = bitcast double* [[A]] to <2 x double>* +; CHECK-NEXT: [[TMP3:%.*]] = load <2 x double>, <2 x double>* [[TMP2]], align 8 +; CHECK-NEXT: [[ARRAYIDX4:%.*]] = getelementptr inbounds double, double* [[B:%.*]], i64 1 +; CHECK-NEXT: [[TMP4:%.*]] = bitcast double* [[B]] to <2 x double>* +; CHECK-NEXT: [[TMP5:%.*]] = load <2 x double>, <2 x double>* [[TMP4]], align 8 +; CHECK-NEXT: [[TMP6:%.*]] = fmul <2 x double> [[TMP3]], [[TMP5]] +; CHECK-NEXT: [[TMP7:%.*]] = call <2 x double> @llvm.floor.v2f64(<2 x double> [[TMP6]]) [ "funclet"(token [[TMP1]]) ] +; CHECK-NEXT: [[ARRAYIDX5:%.*]] = getelementptr inbounds double, double* [[C:%.*]], i64 1 +; CHECK-NEXT: [[TMP8:%.*]] = bitcast double* [[C]] to <2 x double>* +; CHECK-NEXT: store <2 x double> [[TMP7]], <2 x double>* [[TMP8]], align 8 +; CHECK-NEXT: catchret from [[TMP1]] to label [[TRY_CONT:%.*]] +; CHECK: try.cont: +; CHECK-NEXT: ret void +; CHECK: unreachable: +; CHECK-NEXT: unreachable +; entry: invoke void @_CxxThrowException(i8* null, i8* null) - to label %unreachable unwind label %catch.dispatch + to label %unreachable unwind label %catch.dispatch catch.dispatch: ; preds = %entry %0 = catchswitch within none [label %catch] unwind to caller @@ -34,10 +60,6 @@ unreachable: ; preds = %entry unreachable } -; CHECK-LABEL: define void @test1( -; CHECK: %[[cpad:.*]] = catchpad within {{.*}} [i8* null, i32 64, i8* null] -; CHECK: call <2 x double> @llvm.floor.v2f64(<2 x double> {{.*}}) [ "funclet"(token %[[cpad]]) ] - declare x86_stdcallcc void @_CxxThrowException(i8*, i8*) declare i32 @__CxxFrameHandler3(...) diff --git a/llvm/test/Transforms/SLPVectorizer/X86/in-tree-user.ll b/llvm/test/Transforms/SLPVectorizer/X86/in-tree-user.ll index b0ce074..493e09a 100644 --- a/llvm/test/Transforms/SLPVectorizer/X86/in-tree-user.ll +++ b/llvm/test/Transforms/SLPVectorizer/X86/in-tree-user.ll @@ -1,3 +1,4 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py ; RUN: opt < %s -basicaa -slp-vectorizer -dce -S -mtriple=x86_64-apple-macosx10.8.0 -mcpu=corei7-avx | FileCheck %s target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128" @@ -6,12 +7,39 @@ target triple = "x86_64-apple-macosx10.7.0" @.str = private unnamed_addr constant [6 x i8] c"bingo\00", align 1 ; Uses inside the tree must be scheduled after the corresponding tree bundle. -;CHECK-LABEL: @in_tree_user( -;CHECK: load <2 x double> -;CHECK: fadd <2 x double> -;CHECK: InTreeUser = fadd -;CHECK: ret define void @in_tree_user(double* nocapture %A, i32 %n) { +; CHECK-LABEL: @in_tree_user( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[CONV:%.*]] = sitofp i32 [[N:%.*]] to double +; CHECK-NEXT: [[TMP0:%.*]] = insertelement <2 x double> undef, double [[CONV]], i32 0 +; CHECK-NEXT: [[TMP1:%.*]] = insertelement <2 x double> [[TMP0]], double [[CONV]], i32 1 +; CHECK-NEXT: br label [[FOR_BODY:%.*]] +; CHECK: for.body: +; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_INC:%.*]] ] +; CHECK-NEXT: [[TMP2:%.*]] = shl nsw i64 [[INDVARS_IV]], 1 +; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds double, double* [[A:%.*]], i64 [[TMP2]] +; CHECK-NEXT: [[TMP3:%.*]] = bitcast double* [[ARRAYIDX]] to <2 x double>* +; CHECK-NEXT: [[TMP4:%.*]] = load <2 x double>, <2 x double>* [[TMP3]], align 8 +; CHECK-NEXT: [[TMP5:%.*]] = fmul <2 x double> [[TMP1]], [[TMP4]] +; CHECK-NEXT: [[TMP6:%.*]] = fmul <2 x double> , [[TMP5]] +; CHECK-NEXT: [[TMP7:%.*]] = fadd <2 x double> , [[TMP6]] +; CHECK-NEXT: [[TMP8:%.*]] = extractelement <2 x double> [[TMP7]], i32 0 +; CHECK-NEXT: [[INTREEUSER:%.*]] = fadd double [[TMP8]], [[TMP8]] +; CHECK-NEXT: [[TMP9:%.*]] = extractelement <2 x double> [[TMP7]], i32 1 +; CHECK-NEXT: [[CMP11:%.*]] = fcmp ogt double [[TMP8]], [[TMP9]] +; CHECK-NEXT: br i1 [[CMP11]], label [[IF_THEN:%.*]], label [[FOR_INC]] +; CHECK: if.then: +; CHECK-NEXT: [[CALL:%.*]] = tail call i32 (i8*, ...) @printf(i8* getelementptr inbounds ([6 x i8], [6 x i8]* @.str, i64 0, i64 0)) +; CHECK-NEXT: br label [[FOR_INC]] +; CHECK: for.inc: +; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add i64 [[INDVARS_IV]], 1 +; CHECK-NEXT: [[LFTR_WIDEIV:%.*]] = trunc i64 [[INDVARS_IV_NEXT]] to i32 +; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[LFTR_WIDEIV]], 100 +; CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_END:%.*]], label [[FOR_BODY]] +; CHECK: for.end: +; CHECK-NEXT: store double [[INTREEUSER]], double* [[A]], align 8 +; CHECK-NEXT: ret void +; entry: %conv = sitofp i32 %n to double br label %for.body diff --git a/llvm/test/Transforms/SLPVectorizer/X86/return.ll b/llvm/test/Transforms/SLPVectorizer/X86/return.ll index e2b2637..445dcba 100644 --- a/llvm/test/Transforms/SLPVectorizer/X86/return.ll +++ b/llvm/test/Transforms/SLPVectorizer/X86/return.ll @@ -1,3 +1,4 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py ; RUN: opt < %s -basicaa -slp-vectorizer -S | FileCheck %s target datalayout = "e-m:e-p:32:32-f64:32:64-f80:32-n8:16:32-S128" target triple = "x86_64--linux-gnu" @@ -12,12 +13,17 @@ target triple = "x86_64--linux-gnu" ; return sum; ; } -; CHECK-LABEL: @return1 -; CHECK: %0 = load <2 x double>, <2 x double>* -; CHECK: %1 = load <2 x double>, <2 x double>* -; CHECK: %2 = fadd <2 x double> - define double @return1() { +; CHECK-LABEL: @return1( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[TMP0:%.*]] = load <2 x double>, <2 x double>* bitcast ([4 x double]* @a to <2 x double>*), align 8 +; CHECK-NEXT: [[TMP1:%.*]] = load <2 x double>, <2 x double>* bitcast ([4 x double]* @b to <2 x double>*), align 8 +; CHECK-NEXT: [[TMP2:%.*]] = fadd <2 x double> [[TMP0]], [[TMP1]] +; CHECK-NEXT: [[TMP3:%.*]] = extractelement <2 x double> [[TMP2]], i32 0 +; CHECK-NEXT: [[TMP4:%.*]] = extractelement <2 x double> [[TMP2]], i32 1 +; CHECK-NEXT: [[ADD2:%.*]] = fadd double [[TMP3]], [[TMP4]] +; CHECK-NEXT: ret double [[ADD2]] +; entry: %a0 = load double, double* getelementptr inbounds ([4 x double], [4 x double]* @a, i32 0, i32 0), align 8 %b0 = load double, double* getelementptr inbounds ([4 x double], [4 x double]* @b, i32 0, i32 0), align 8 @@ -33,12 +39,22 @@ entry: ; return ((x[0] + x[2]) + (x[1] + x[3])); ; } -; CHECK-LABEL: @return2 -; CHECK: %1 = load <2 x double>, <2 x double>* -; CHECK: %3 = load <2 x double>, <2 x double>* %2 -; CHECK: %4 = fadd <2 x double> %1, %3 - define double @return2(double* nocapture readonly %x) { +; CHECK-LABEL: @return2( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds double, double* [[X:%.*]], i32 2 +; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds double, double* [[X]], i32 1 +; CHECK-NEXT: [[TMP0:%.*]] = bitcast double* [[X]] to <2 x double>* +; CHECK-NEXT: [[TMP1:%.*]] = load <2 x double>, <2 x double>* [[TMP0]], align 4 +; CHECK-NEXT: [[ARRAYIDX3:%.*]] = getelementptr inbounds double, double* [[X]], i32 3 +; CHECK-NEXT: [[TMP2:%.*]] = bitcast double* [[ARRAYIDX1]] to <2 x double>* +; CHECK-NEXT: [[TMP3:%.*]] = load <2 x double>, <2 x double>* [[TMP2]], align 4 +; CHECK-NEXT: [[TMP4:%.*]] = fadd <2 x double> [[TMP1]], [[TMP3]] +; CHECK-NEXT: [[TMP5:%.*]] = extractelement <2 x double> [[TMP4]], i32 0 +; CHECK-NEXT: [[TMP6:%.*]] = extractelement <2 x double> [[TMP4]], i32 1 +; CHECK-NEXT: [[ADD5:%.*]] = fadd double [[TMP5]], [[TMP6]] +; CHECK-NEXT: ret double [[ADD5]] +; entry: %x0 = load double, double* %x, align 4 %arrayidx1 = getelementptr inbounds double, double* %x, i32 2