From: Roman Lebedev Date: Tue, 23 Jul 2019 12:42:41 +0000 (+0000) Subject: [NFC][PhaseOredering][SimplifyCFG] Add more runlines to umul.with.overflow tests X-Git-Url: http://review.tizen.org/git/?a=commitdiff_plain;h=87fdcb8749681e39a03eb830ae881cec95150911;p=platform%2Fupstream%2Fllvm.git [NFC][PhaseOredering][SimplifyCFG] Add more runlines to umul.with.overflow tests This way it will be more obvious that the problem is both in cost threshold and in hardcoded benefit check, plus will show how the instsimplify cleans this all in the end. llvm-svn: 366800 --- diff --git a/llvm/test/Transforms/PhaseOrdering/unsigned-multiply-overflow-check.ll b/llvm/test/Transforms/PhaseOrdering/unsigned-multiply-overflow-check.ll index b3e5f9b..b104ce8 100644 --- a/llvm/test/Transforms/PhaseOrdering/unsigned-multiply-overflow-check.ll +++ b/llvm/test/Transforms/PhaseOrdering/unsigned-multiply-overflow-check.ll @@ -1,7 +1,10 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py ; RUN: opt -simplifycfg -S < %s | FileCheck %s --check-prefixes=ALL,SIMPLIFYCFG ; RUN: opt -instcombine -S < %s | FileCheck %s --check-prefixes=ALL,INSTCOMBINE,INSTCOMBINEONLY -; RUN: opt -instcombine -simplifycfg -S < %s | FileCheck %s --check-prefixes=ALL,INSTCOMBINE,BOTH +; RUN: opt -instcombine -simplifycfg -S < %s | FileCheck %s --check-prefixes=ALL,INSTCOMBINE,INSTCOMBINESIMPLIFYCFG,INSTCOMBINESIMPLIFYCFGDEFAULT,INSTCOMBINESIMPLIFYCFGONLY +; RUN: opt -instcombine -simplifycfg -instcombine -S < %s | FileCheck %s --check-prefixes=ALL,INSTCOMBINE,INSTCOMBINESIMPLIFYCFG,INSTCOMBINESIMPLIFYCFGDEFAULT,INSTCOMBINESIMPLIFYCFGINSTCOMBINE +; RUN: opt -instcombine -simplifycfg -phi-node-folding-threshold=3 -S < %s | FileCheck %s --check-prefixes=ALL,INSTCOMBINE,INSTCOMBINESIMPLIFYCFG,INSTCOMBINESIMPLIFYCFGCOSTLY,INSTCOMBINESIMPLIFYCFGCOSTLYONLY +; RUN: opt -instcombine -simplifycfg -instcombine -phi-node-folding-threshold=3 -S < %s | FileCheck %s --check-prefixes=ALL,INSTCOMBINE,INSTCOMBINESIMPLIFYCFG,INSTCOMBINESIMPLIFYCFGCOSTLY,INSTCOMBINESIMPLIFYCFGCOSTLYINSTCOMBINE target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128" target triple = "x86_64-pc-linux-gnu" diff --git a/llvm/test/Transforms/SimplifyCFG/unsigned-multiplication-will-overflow.ll b/llvm/test/Transforms/SimplifyCFG/unsigned-multiplication-will-overflow.ll index 0c02fba..ce0c829 100644 --- a/llvm/test/Transforms/SimplifyCFG/unsigned-multiplication-will-overflow.ll +++ b/llvm/test/Transforms/SimplifyCFG/unsigned-multiplication-will-overflow.ll @@ -1,23 +1,25 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py -; RUN: opt < %s -simplifycfg -S | FileCheck %s +; RUN: opt < %s -simplifycfg -S | FileCheck %s --check-prefixes=ALL,DEFAULT,FALLBACK0 +; RUN: opt < %s -simplifycfg -phi-node-folding-threshold=2 -S | FileCheck %s --check-prefixes=ALL,DEFAULT,FALLBACK1 +; RUN: opt < %s -simplifycfg -phi-node-folding-threshold=3 -S | FileCheck %s --check-prefixes=ALL,COSTLY ; This is checking that the multiplication does overflow, with a leftover ; guard against division-by-zero that was needed before InstCombine ; produced llvm.umul.with.overflow. define i1 @will_overflow(i64 %size, i64 %nmemb) { -; CHECK-LABEL: @will_overflow( -; CHECK-NEXT: entry: -; CHECK-NEXT: [[CMP:%.*]] = icmp eq i64 [[SIZE:%.*]], 0 -; CHECK-NEXT: br i1 [[CMP]], label [[LAND_END:%.*]], label [[LAND_RHS:%.*]] -; CHECK: land.rhs: -; CHECK-NEXT: [[UMUL:%.*]] = tail call { i64, i1 } @llvm.umul.with.overflow.i64(i64 [[SIZE]], i64 [[NMEMB:%.*]]) -; CHECK-NEXT: [[UMUL_OV:%.*]] = extractvalue { i64, i1 } [[UMUL]], 1 -; CHECK-NEXT: [[UMUL_NOT_OV:%.*]] = xor i1 [[UMUL_OV]], true -; CHECK-NEXT: br label [[LAND_END]] -; CHECK: land.end: -; CHECK-NEXT: [[TMP0:%.*]] = phi i1 [ true, [[ENTRY:%.*]] ], [ [[UMUL_NOT_OV]], [[LAND_RHS]] ] -; CHECK-NEXT: ret i1 [[TMP0]] +; ALL-LABEL: @will_overflow( +; ALL-NEXT: entry: +; ALL-NEXT: [[CMP:%.*]] = icmp eq i64 [[SIZE:%.*]], 0 +; ALL-NEXT: br i1 [[CMP]], label [[LAND_END:%.*]], label [[LAND_RHS:%.*]] +; ALL: land.rhs: +; ALL-NEXT: [[UMUL:%.*]] = tail call { i64, i1 } @llvm.umul.with.overflow.i64(i64 [[SIZE]], i64 [[NMEMB:%.*]]) +; ALL-NEXT: [[UMUL_OV:%.*]] = extractvalue { i64, i1 } [[UMUL]], 1 +; ALL-NEXT: [[UMUL_NOT_OV:%.*]] = xor i1 [[UMUL_OV]], true +; ALL-NEXT: br label [[LAND_END]] +; ALL: land.end: +; ALL-NEXT: [[TMP0:%.*]] = phi i1 [ true, [[ENTRY:%.*]] ], [ [[UMUL_NOT_OV]], [[LAND_RHS]] ] +; ALL-NEXT: ret i1 [[TMP0]] ; entry: %cmp = icmp eq i64 %size, 0