From: Markus Lavin Date: Fri, 7 Dec 2018 08:23:37 +0000 (+0000) Subject: [PM] Port LoadStoreVectorizer to the new pass manager. X-Git-Tag: llvmorg-8.0.0-rc1~2642 X-Git-Url: http://review.tizen.org/git/?a=commitdiff_plain;h=4dc4ebd606d6bfa9ae4a8d9c7182f01873da96ee;p=platform%2Fupstream%2Fllvm.git [PM] Port LoadStoreVectorizer to the new pass manager. Differential Revision: https://reviews.llvm.org/D54848 llvm-svn: 348570 --- diff --git a/llvm/include/llvm/InitializePasses.h b/llvm/include/llvm/InitializePasses.h index 7d42952..d535864 100644 --- a/llvm/include/llvm/InitializePasses.h +++ b/llvm/include/llvm/InitializePasses.h @@ -206,7 +206,7 @@ void initializeLiveRangeShrinkPass(PassRegistry&); void initializeLiveRegMatrixPass(PassRegistry&); void initializeLiveStacksPass(PassRegistry&); void initializeLiveVariablesPass(PassRegistry&); -void initializeLoadStoreVectorizerPass(PassRegistry&); +void initializeLoadStoreVectorizerLegacyPassPass(PassRegistry&); void initializeLoaderPassPass(PassRegistry&); void initializeLocalStackSlotPassPass(PassRegistry&); void initializeLocalizerPass(PassRegistry&); diff --git a/llvm/include/llvm/Transforms/Vectorize/LoadStoreVectorizer.h b/llvm/include/llvm/Transforms/Vectorize/LoadStoreVectorizer.h new file mode 100644 index 0000000..6b37d70 --- /dev/null +++ b/llvm/include/llvm/Transforms/Vectorize/LoadStoreVectorizer.h @@ -0,0 +1,27 @@ +//===- LoadStoreVectorizer.cpp - GPU Load & Store Vectorizer --------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_TRANSFORMS_VECTORIZE_LOADSTOREVECTORIZER_H +#define LLVM_TRANSFORMS_VECTORIZE_LOADSTOREVECTORIZER_H + +#include "llvm/IR/PassManager.h" + +namespace llvm { + +class LoadStoreVectorizerPass : public PassInfoMixin { +public: + PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM); +}; + +/// Create a legacy pass manager instance of the LoadStoreVectorizer pass +Pass *createLoadStoreVectorizerPass(); + +} + +#endif /* LLVM_TRANSFORMS_VECTORIZE_LOADSTOREVECTORIZER_H */ diff --git a/llvm/lib/Passes/PassBuilder.cpp b/llvm/lib/Passes/PassBuilder.cpp index 35ed0ac..460b99c 100644 --- a/llvm/lib/Passes/PassBuilder.cpp +++ b/llvm/lib/Passes/PassBuilder.cpp @@ -157,6 +157,7 @@ #include "llvm/Transforms/Utils/Mem2Reg.h" #include "llvm/Transforms/Utils/NameAnonGlobals.h" #include "llvm/Transforms/Utils/SymbolRewriter.h" +#include "llvm/Transforms/Vectorize/LoadStoreVectorizer.h" #include "llvm/Transforms/Vectorize/LoopVectorize.h" #include "llvm/Transforms/Vectorize/SLPVectorizer.h" diff --git a/llvm/lib/Passes/PassRegistry.def b/llvm/lib/Passes/PassRegistry.def index 43b6a16..99bd5d8 100644 --- a/llvm/lib/Passes/PassRegistry.def +++ b/llvm/lib/Passes/PassRegistry.def @@ -178,6 +178,7 @@ FUNCTION_PASS("lower-expect", LowerExpectIntrinsicPass()) FUNCTION_PASS("lower-guard-intrinsic", LowerGuardIntrinsicPass()) FUNCTION_PASS("guard-widening", GuardWideningPass()) FUNCTION_PASS("gvn", GVN()) +FUNCTION_PASS("load-store-vectorizer", LoadStoreVectorizerPass()) FUNCTION_PASS("loop-simplify", LoopSimplifyPass()) FUNCTION_PASS("loop-sink", LoopSinkPass()) FUNCTION_PASS("lowerinvoke", LowerInvokePass()) diff --git a/llvm/lib/Transforms/Vectorize/LoadStoreVectorizer.cpp b/llvm/lib/Transforms/Vectorize/LoadStoreVectorizer.cpp index f2344fe..9ff1832 100644 --- a/llvm/lib/Transforms/Vectorize/LoadStoreVectorizer.cpp +++ b/llvm/lib/Transforms/Vectorize/LoadStoreVectorizer.cpp @@ -79,6 +79,7 @@ #include "llvm/Support/MathExtras.h" #include "llvm/Support/raw_ostream.h" #include "llvm/Transforms/Vectorize.h" +#include "llvm/Transforms/Vectorize/LoadStoreVectorizer.h" #include #include #include @@ -205,12 +206,12 @@ private: unsigned Alignment); }; -class LoadStoreVectorizer : public FunctionPass { +class LoadStoreVectorizerLegacyPass : public FunctionPass { public: static char ID; - LoadStoreVectorizer() : FunctionPass(ID) { - initializeLoadStoreVectorizerPass(*PassRegistry::getPassRegistry()); + LoadStoreVectorizerLegacyPass() : FunctionPass(ID) { + initializeLoadStoreVectorizerLegacyPassPass(*PassRegistry::getPassRegistry()); } bool runOnFunction(Function &F) override; @@ -230,30 +231,23 @@ public: } // end anonymous namespace -char LoadStoreVectorizer::ID = 0; +char LoadStoreVectorizerLegacyPass::ID = 0; -INITIALIZE_PASS_BEGIN(LoadStoreVectorizer, DEBUG_TYPE, +INITIALIZE_PASS_BEGIN(LoadStoreVectorizerLegacyPass, DEBUG_TYPE, "Vectorize load and Store instructions", false, false) INITIALIZE_PASS_DEPENDENCY(SCEVAAWrapperPass) INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass) INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass) INITIALIZE_PASS_DEPENDENCY(GlobalsAAWrapperPass) INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass) -INITIALIZE_PASS_END(LoadStoreVectorizer, DEBUG_TYPE, +INITIALIZE_PASS_END(LoadStoreVectorizerLegacyPass, DEBUG_TYPE, "Vectorize load and store instructions", false, false) Pass *llvm::createLoadStoreVectorizerPass() { - return new LoadStoreVectorizer(); + return new LoadStoreVectorizerLegacyPass(); } -// The real propagateMetadata expects a SmallVector, but we deal in -// vectors of Instructions. -static void propagateMetadata(Instruction *I, ArrayRef IL) { - SmallVector VL(IL.begin(), IL.end()); - propagateMetadata(I, VL); -} - -bool LoadStoreVectorizer::runOnFunction(Function &F) { +bool LoadStoreVectorizerLegacyPass::runOnFunction(Function &F) { // Don't vectorize when the attribute NoImplicitFloat is used. if (skipFunction(F) || F.hasFnAttribute(Attribute::NoImplicitFloat)) return false; @@ -268,6 +262,30 @@ bool LoadStoreVectorizer::runOnFunction(Function &F) { return V.run(); } +PreservedAnalyses LoadStoreVectorizerPass::run(Function &F, FunctionAnalysisManager &AM) { + // Don't vectorize when the attribute NoImplicitFloat is used. + if (F.hasFnAttribute(Attribute::NoImplicitFloat)) + return PreservedAnalyses::all(); + + AliasAnalysis &AA = AM.getResult(F); + DominatorTree &DT = AM.getResult(F); + ScalarEvolution &SE = AM.getResult(F); + TargetTransformInfo &TTI = AM.getResult(F); + + Vectorizer V(F, AA, DT, SE, TTI); + bool Changed = V.run(); + PreservedAnalyses PA; + PA.preserveSet(); + return Changed ? PA : PreservedAnalyses::all(); +} + +// The real propagateMetadata expects a SmallVector, but we deal in +// vectors of Instructions. +static void propagateMetadata(Instruction *I, ArrayRef IL) { + SmallVector VL(IL.begin(), IL.end()); + propagateMetadata(I, VL); +} + // Vectorizer Implementation bool Vectorizer::run() { bool Changed = false; diff --git a/llvm/lib/Transforms/Vectorize/Vectorize.cpp b/llvm/lib/Transforms/Vectorize/Vectorize.cpp index f62a885..559ab19 100644 --- a/llvm/lib/Transforms/Vectorize/Vectorize.cpp +++ b/llvm/lib/Transforms/Vectorize/Vectorize.cpp @@ -27,7 +27,7 @@ using namespace llvm; void llvm::initializeVectorization(PassRegistry &Registry) { initializeLoopVectorizePass(Registry); initializeSLPVectorizerPass(Registry); - initializeLoadStoreVectorizerPass(Registry); + initializeLoadStoreVectorizerLegacyPassPass(Registry); } void LLVMInitializeVectorization(LLVMPassRegistryRef R) { diff --git a/llvm/test/Transforms/LoadStoreVectorizer/AMDGPU/adjust-alloca-alignment.ll b/llvm/test/Transforms/LoadStoreVectorizer/AMDGPU/adjust-alloca-alignment.ll index fb704ef..b0dd5d1 100644 --- a/llvm/test/Transforms/LoadStoreVectorizer/AMDGPU/adjust-alloca-alignment.ll +++ b/llvm/test/Transforms/LoadStoreVectorizer/AMDGPU/adjust-alloca-alignment.ll @@ -1,5 +1,7 @@ ; RUN: opt -S -load-store-vectorizer -mattr=-unaligned-buffer-access,+max-private-element-size-16 < %s | FileCheck -check-prefix=ALIGNED -check-prefix=ALL %s ; RUN: opt -S -load-store-vectorizer -mattr=+unaligned-buffer-access,+unaligned-scratch-access,+max-private-element-size-16 < %s | FileCheck -check-prefix=UNALIGNED -check-prefix=ALL %s +; RUN: opt -S -passes='function(load-store-vectorizer)' -mattr=-unaligned-buffer-access,+max-private-element-size-16 < %s | FileCheck -check-prefix=ALIGNED -check-prefix=ALL %s +; RUN: opt -S -passes='function(load-store-vectorizer)' -mattr=+unaligned-buffer-access,+unaligned-scratch-access,+max-private-element-size-16 < %s | FileCheck -check-prefix=UNALIGNED -check-prefix=ALL %s target triple = "amdgcn--" target datalayout = "e-p:64:64-p1:64:64-p2:32:32-p3:32:32-p4:64:64-p5:32:32-p6:32:32-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64-S32-A5" diff --git a/llvm/test/Transforms/LoadStoreVectorizer/AMDGPU/complex-index.ll b/llvm/test/Transforms/LoadStoreVectorizer/AMDGPU/complex-index.ll index a118ff8..cd1c7fdc 100644 --- a/llvm/test/Transforms/LoadStoreVectorizer/AMDGPU/complex-index.ll +++ b/llvm/test/Transforms/LoadStoreVectorizer/AMDGPU/complex-index.ll @@ -1,4 +1,5 @@ ; RUN: opt -mtriple=amdgcn-amd-amdhsa -basicaa -load-store-vectorizer -S -o - %s | FileCheck %s +; RUN: opt -mtriple=amdgcn-amd-amdhsa -aa-pipeline=basic-aa -passes='function(load-store-vectorizer)' -S -o - %s | FileCheck %s target datalayout = "e-p:64:64-p1:64:64-p2:32:32-p3:32:32-p4:64:64-p5:32:32-p6:32:32-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64-S32-A5" @@ -48,4 +49,4 @@ entry: %cstoreval2 = fptrunc double %storeval2 to float store float %cstoreval2, float addrspace(1)* %arrayidx24, align 4 ret void -} \ No newline at end of file +} diff --git a/llvm/test/Transforms/LoadStoreVectorizer/AMDGPU/extended-index.ll b/llvm/test/Transforms/LoadStoreVectorizer/AMDGPU/extended-index.ll index 317c257..b8e95a6 100644 --- a/llvm/test/Transforms/LoadStoreVectorizer/AMDGPU/extended-index.ll +++ b/llvm/test/Transforms/LoadStoreVectorizer/AMDGPU/extended-index.ll @@ -1,4 +1,5 @@ ; RUN: opt -mtriple=amdgcn-amd-amdhsa -basicaa -load-store-vectorizer -S -o - %s | FileCheck %s +; RUN: opt -mtriple=amdgcn-amd-amdhsa -aa-pipeline=basic-aa -passes='function(load-store-vectorizer)' -S -o - %s | FileCheck %s target datalayout = "e-p:64:64-p1:64:64-p2:32:32-p3:32:32-p4:64:64-p5:32:32-p6:32:32-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64-S32-A5" diff --git a/llvm/test/Transforms/LoadStoreVectorizer/AMDGPU/gep-bitcast.ll b/llvm/test/Transforms/LoadStoreVectorizer/AMDGPU/gep-bitcast.ll index 537922e..5bb6289 100644 --- a/llvm/test/Transforms/LoadStoreVectorizer/AMDGPU/gep-bitcast.ll +++ b/llvm/test/Transforms/LoadStoreVectorizer/AMDGPU/gep-bitcast.ll @@ -1,4 +1,5 @@ ; RUN: opt -S -mtriple=amdgcn--amdhsa -load-store-vectorizer < %s | FileCheck %s +; RUN: opt -S -mtriple=amdgcn--amdhsa -passes='function(load-store-vectorizer)' < %s | FileCheck %s target datalayout = "e-p:64:64-p1:64:64-p2:32:32-p3:32:32-p4:64:64-p5:32:32-p6:32:32-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64-S32-A5" diff --git a/llvm/test/Transforms/LoadStoreVectorizer/AMDGPU/insertion-point.ll b/llvm/test/Transforms/LoadStoreVectorizer/AMDGPU/insertion-point.ll index a741149..35836f8 100644 --- a/llvm/test/Transforms/LoadStoreVectorizer/AMDGPU/insertion-point.ll +++ b/llvm/test/Transforms/LoadStoreVectorizer/AMDGPU/insertion-point.ll @@ -1,4 +1,5 @@ ; RUN: opt -mtriple=amdgcn-amd-amdhsa -basicaa -load-store-vectorizer -S -o - %s | FileCheck %s +; RUN: opt -mtriple=amdgcn-amd-amdhsa -aa-pipeline=basic-aa -passes='function(load-store-vectorizer)' -S -o - %s | FileCheck %s target datalayout = "e-p:64:64-p1:64:64-p2:32:32-p3:32:32-p4:64:64-p5:32:32-p6:32:32-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64-S32-A5" diff --git a/llvm/test/Transforms/LoadStoreVectorizer/AMDGPU/interleaved-mayalias-store.ll b/llvm/test/Transforms/LoadStoreVectorizer/AMDGPU/interleaved-mayalias-store.ll index ccca9e6..81ebb71 100644 --- a/llvm/test/Transforms/LoadStoreVectorizer/AMDGPU/interleaved-mayalias-store.ll +++ b/llvm/test/Transforms/LoadStoreVectorizer/AMDGPU/interleaved-mayalias-store.ll @@ -1,4 +1,5 @@ ; RUN: opt -mtriple=amdgcn-amd-amdhsa -basicaa -load-store-vectorizer -S -o - %s | FileCheck %s +; RUN: opt -mtriple=amdgcn-amd-amdhsa -aa-pipeline=basic-aa -passes='function(load-store-vectorizer)' -S -o - %s | FileCheck %s target datalayout = "e-p:64:64-p1:64:64-p2:32:32-p3:32:32-p4:64:64-p5:32:32-p6:32:32-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64-S32-A5" diff --git a/llvm/test/Transforms/LoadStoreVectorizer/AMDGPU/invariant-load.ll b/llvm/test/Transforms/LoadStoreVectorizer/AMDGPU/invariant-load.ll index e055b8a..15c4771 100644 --- a/llvm/test/Transforms/LoadStoreVectorizer/AMDGPU/invariant-load.ll +++ b/llvm/test/Transforms/LoadStoreVectorizer/AMDGPU/invariant-load.ll @@ -1,4 +1,5 @@ ; RUN: opt -mtriple=amdgcn-amd-amdhsa -basicaa -load-store-vectorizer -S -o - %s | FileCheck %s +; RUN: opt -mtriple=amdgcn-amd-amdhsa -aa-pipeline=basic-aa -passes='function(load-store-vectorizer)' -S -o - %s | FileCheck %s target datalayout = "e-p:64:64-p1:64:64-p2:32:32-p3:32:32-p4:64:64-p5:32:32-p6:32:32-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64-S32-A5" diff --git a/llvm/test/Transforms/LoadStoreVectorizer/X86/codegenprepare-produced-address-math.ll b/llvm/test/Transforms/LoadStoreVectorizer/X86/codegenprepare-produced-address-math.ll index a9b7229..e29f3df 100644 --- a/llvm/test/Transforms/LoadStoreVectorizer/X86/codegenprepare-produced-address-math.ll +++ b/llvm/test/Transforms/LoadStoreVectorizer/X86/codegenprepare-produced-address-math.ll @@ -1,5 +1,7 @@ ; RUN: opt -codegenprepare -load-store-vectorizer %s -S -o - | FileCheck %s ; RUN: opt -load-store-vectorizer %s -S -o - | FileCheck %s +; RUN: opt -codegenprepare -aa-pipeline=basic-aa -passes='function(load-store-vectorizer)' %s -S -o - | FileCheck %s +; RUN: opt -aa-pipeline=basic-aa -passes='function(load-store-vectorizer)' %s -S -o - | FileCheck %s target triple = "x86_64--" diff --git a/llvm/test/Transforms/LoadStoreVectorizer/X86/compare-scev-by-complexity.ll b/llvm/test/Transforms/LoadStoreVectorizer/X86/compare-scev-by-complexity.ll index 7f29a73..e2181f6 100644 --- a/llvm/test/Transforms/LoadStoreVectorizer/X86/compare-scev-by-complexity.ll +++ b/llvm/test/Transforms/LoadStoreVectorizer/X86/compare-scev-by-complexity.ll @@ -1,4 +1,5 @@ ; RUN: opt -load-store-vectorizer %s -S | FileCheck %s +; RUN: opt -aa-pipeline=basic-aa -passes='function(load-store-vectorizer)' %s -S | FileCheck %s ; Check that setting wrapping flags after a SCEV node is created ; does not invalidate "sorted by complexity" invariant for diff --git a/llvm/test/Transforms/LoadStoreVectorizer/X86/correct-order.ll b/llvm/test/Transforms/LoadStoreVectorizer/X86/correct-order.ll index fd2ae51f..043d6ea 100644 --- a/llvm/test/Transforms/LoadStoreVectorizer/X86/correct-order.ll +++ b/llvm/test/Transforms/LoadStoreVectorizer/X86/correct-order.ll @@ -1,4 +1,5 @@ ; RUN: opt -mtriple=x86_64-unknown-linux-gnu -load-store-vectorizer -S -o - %s | FileCheck %s +; RUN: opt -mtriple=x86_64-unknown-linux-gnu -aa-pipeline=basic-aa -passes='function(load-store-vectorizer)' -S -o - %s | FileCheck %s target datalayout = "e-m:e-i64:64-i128:128-n32:64-S128" diff --git a/llvm/test/Transforms/LoadStoreVectorizer/X86/load-width.ll b/llvm/test/Transforms/LoadStoreVectorizer/X86/load-width.ll index a61b251..ac5f3ea 100644 --- a/llvm/test/Transforms/LoadStoreVectorizer/X86/load-width.ll +++ b/llvm/test/Transforms/LoadStoreVectorizer/X86/load-width.ll @@ -1,5 +1,7 @@ ; RUN: opt -mtriple=x86_64-unknown-linux-gnu -load-store-vectorizer -mcpu haswell -S -o - %s | FileCheck --check-prefix=CHECK-HSW %s ; RUN: opt -mtriple=x86_64-unknown-linux-gnu -load-store-vectorizer -mcpu knl -S -o - %s | FileCheck --check-prefix=CHECK-KNL %s +; RUN: opt -mtriple=x86_64-unknown-linux-gnu -aa-pipeline=basic-aa -passes='function(load-store-vectorizer)' -mcpu haswell -S -o - %s | FileCheck --check-prefix=CHECK-HSW %s +; RUN: opt -mtriple=x86_64-unknown-linux-gnu -aa-pipeline=basic-aa -passes='function(load-store-vectorizer)' -mcpu knl -S -o - %s | FileCheck --check-prefix=CHECK-KNL %s define <8 x double> @loadwidth_insert_extract(double* %ptr) { %a = bitcast double* %ptr to <2 x double> * diff --git a/llvm/test/Transforms/LoadStoreVectorizer/X86/merge-tbaa.ll b/llvm/test/Transforms/LoadStoreVectorizer/X86/merge-tbaa.ll index b4493a8..a93e9ac 100644 --- a/llvm/test/Transforms/LoadStoreVectorizer/X86/merge-tbaa.ll +++ b/llvm/test/Transforms/LoadStoreVectorizer/X86/merge-tbaa.ll @@ -1,5 +1,7 @@ ; RUN: opt -mtriple=x86_64-unknown-linux-gnu -load-store-vectorizer -S < %s | \ ; RUN: FileCheck %s +; RUN: opt -mtriple=x86_64-unknown-linux-gnu -aa-pipeline=basic-aa -passes='function(load-store-vectorizer)' -S < %s | \ +; RUN: FileCheck %s ; ; The GPU Load & Store Vectorizer may merge differently-typed accesses into a ; single instruction. This test checks that we merge TBAA tags for such diff --git a/llvm/test/Transforms/LoadStoreVectorizer/X86/non-byte-size.ll b/llvm/test/Transforms/LoadStoreVectorizer/X86/non-byte-size.ll index 1f00f98..7a00738 100644 --- a/llvm/test/Transforms/LoadStoreVectorizer/X86/non-byte-size.ll +++ b/llvm/test/Transforms/LoadStoreVectorizer/X86/non-byte-size.ll @@ -1,5 +1,6 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py ; RUN: opt < %s -load-store-vectorizer -S -mtriple=x86_64-unknown-linux-gnu | FileCheck %s +; RUN: opt < %s -aa-pipeline=basic-aa -passes='function(load-store-vectorizer)' -S -mtriple=x86_64-unknown-linux-gnu | FileCheck %s %rec = type { i32, i28 } diff --git a/llvm/test/Transforms/LoadStoreVectorizer/X86/preserve-order32.ll b/llvm/test/Transforms/LoadStoreVectorizer/X86/preserve-order32.ll index 12d882a..92d05f7 100644 --- a/llvm/test/Transforms/LoadStoreVectorizer/X86/preserve-order32.ll +++ b/llvm/test/Transforms/LoadStoreVectorizer/X86/preserve-order32.ll @@ -1,4 +1,5 @@ ; RUN: opt -mtriple=x86-linux -load-store-vectorizer -S -o - %s | FileCheck %s +; RUN: opt -mtriple=x86-linux -aa-pipeline=basic-aa -passes='function(load-store-vectorizer)' -S -o - %s | FileCheck %s target datalayout = "e-p:32:32-p1:64:64-p2:64:64-p3:32:32-p4:64:64-p5:32:32-p24:64:64-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64" diff --git a/llvm/test/Transforms/LoadStoreVectorizer/X86/preserve-order64.ll b/llvm/test/Transforms/LoadStoreVectorizer/X86/preserve-order64.ll index bf75ecf..3ae0d89 100644 --- a/llvm/test/Transforms/LoadStoreVectorizer/X86/preserve-order64.ll +++ b/llvm/test/Transforms/LoadStoreVectorizer/X86/preserve-order64.ll @@ -1,4 +1,5 @@ ; RUN: opt -mtriple=x86_64-unknown-linux-gnu -load-store-vectorizer -S -o - %s | FileCheck %s +; RUN: opt -mtriple=x86_64-unknown-linux-gnu -aa-pipeline=basic-aa -passes='function(load-store-vectorizer)' -S -o - %s | FileCheck %s target datalayout = "e-m:e-i64:64-i128:128-n32:64-S128" diff --git a/llvm/test/Transforms/LoadStoreVectorizer/X86/subchain-interleaved.ll b/llvm/test/Transforms/LoadStoreVectorizer/X86/subchain-interleaved.ll index 915b94a..72b2991 100644 --- a/llvm/test/Transforms/LoadStoreVectorizer/X86/subchain-interleaved.ll +++ b/llvm/test/Transforms/LoadStoreVectorizer/X86/subchain-interleaved.ll @@ -1,4 +1,5 @@ ; RUN: opt -mtriple=x86_64-unknown-linux-gnu -load-store-vectorizer -S -o - %s | FileCheck %s +; RUN: opt -mtriple=x86_64-unknown-linux-gnu -aa-pipeline=basic-aa -passes='function(load-store-vectorizer)' -S -o - %s | FileCheck %s target datalayout = "e-m:e-i64:64-i128:128-n32:64-S128" diff --git a/llvm/test/Transforms/LoadStoreVectorizer/X86/vector-scalar.ll b/llvm/test/Transforms/LoadStoreVectorizer/X86/vector-scalar.ll index 379b235..00971f3 100644 --- a/llvm/test/Transforms/LoadStoreVectorizer/X86/vector-scalar.ll +++ b/llvm/test/Transforms/LoadStoreVectorizer/X86/vector-scalar.ll @@ -1,4 +1,5 @@ ; RUN: opt -mtriple=x86_64-unknown-linux-gnu -load-store-vectorizer -mcpu haswell -S -o - %s | FileCheck %s +; RUN: opt -mtriple=x86_64-unknown-linux-gnu -aa-pipeline=basic-aa -passes='function(load-store-vectorizer)' -mcpu haswell -S -o - %s | FileCheck %s ; Check that the LoadStoreVectorizer does not crash due to not differentiating <1 x T> and T. diff --git a/llvm/test/Transforms/LoadStoreVectorizer/int_sideeffect.ll b/llvm/test/Transforms/LoadStoreVectorizer/int_sideeffect.ll index 07bdc91..07487b5 100644 --- a/llvm/test/Transforms/LoadStoreVectorizer/int_sideeffect.ll +++ b/llvm/test/Transforms/LoadStoreVectorizer/int_sideeffect.ll @@ -1,4 +1,5 @@ ; RUN: opt -S < %s -load-store-vectorizer | FileCheck %s +; RUN: opt -S < %s -passes='function(load-store-vectorizer)' | FileCheck %s declare void @llvm.sideeffect()