From 01a057a0c4752940f4ba32b84bf209e85564e424 Mon Sep 17 00:00:00 2001 From: David L Kreitzer Date: Fri, 14 Oct 2016 18:20:41 +0000 Subject: [PATCH] Add a pass to optimize patterns of vectorized interleaved memory accesses for X86. The pass optimizes as a unit the entire wide load + shuffles pattern produced by interleaved vectorization. This initial patch optimizes one pattern (64-bit elements interleaved by a factor of 4). Future patches will generalize to additional patterns. Patch by Farhana Aleen Differential revision: http://reviews.llvm.org/D24681 llvm-svn: 284260 --- llvm/lib/CodeGen/InterleavedAccessPass.cpp | 5 + llvm/lib/Target/X86/CMakeLists.txt | 1 + llvm/lib/Target/X86/X86ISelLowering.h | 8 ++ llvm/lib/Target/X86/X86InterleavedAccess.cpp | 117 +++++++++++++++++++ llvm/lib/Target/X86/X86TargetMachine.cpp | 3 + llvm/test/CodeGen/X86/x86-interleaved-access.ll | 129 +++++++++++++++++++++ .../X86/interleaved-accesses-64bits-avx.ll | 105 +++++++++++++++++ .../Transforms/InterleavedAccess/X86/lit.local.cfg | 2 + 8 files changed, 370 insertions(+) create mode 100644 llvm/lib/Target/X86/X86InterleavedAccess.cpp create mode 100644 llvm/test/CodeGen/X86/x86-interleaved-access.ll create mode 100644 llvm/test/Transforms/InterleavedAccess/X86/interleaved-accesses-64bits-avx.ll create mode 100644 llvm/test/Transforms/InterleavedAccess/X86/lit.local.cfg diff --git a/llvm/lib/CodeGen/InterleavedAccessPass.cpp b/llvm/lib/CodeGen/InterleavedAccessPass.cpp index eec282d..362f617 100644 --- a/llvm/lib/CodeGen/InterleavedAccessPass.cpp +++ b/llvm/lib/CodeGen/InterleavedAccessPass.cpp @@ -29,6 +29,9 @@ // It could be transformed into a ld2 intrinsic in AArch64 backend or a vld2 // intrinsic in ARM backend. // +// In X86, this can be further optimized into a set of target +// specific loads followed by an optimized sequence of shuffles. +// // E.g. An interleaved store (Factor = 3): // %i.vec = shuffle <8 x i32> %v0, <8 x i32> %v1, // <0, 4, 8, 1, 5, 9, 2, 6, 10, 3, 7, 11> @@ -37,6 +40,8 @@ // It could be transformed into a st3 intrinsic in AArch64 backend or a vst3 // intrinsic in ARM backend. // +// Similarly, a set of interleaved stores can be transformed into an optimized +// sequence of shuffles followed by a set of target specific stores for X86. //===----------------------------------------------------------------------===// #include "llvm/CodeGen/Passes.h" diff --git a/llvm/lib/Target/X86/CMakeLists.txt b/llvm/lib/Target/X86/CMakeLists.txt index 8679278..bd2d300 100644 --- a/llvm/lib/Target/X86/CMakeLists.txt +++ b/llvm/lib/Target/X86/CMakeLists.txt @@ -24,6 +24,7 @@ set(sources X86FrameLowering.cpp X86ISelDAGToDAG.cpp X86ISelLowering.cpp + X86InterleavedAccess.cpp X86InstrFMA3Info.cpp X86InstrInfo.cpp X86MCInstLower.cpp diff --git a/llvm/lib/Target/X86/X86ISelLowering.h b/llvm/lib/Target/X86/X86ISelLowering.h index 3fa2283..e1429b8 100644 --- a/llvm/lib/Target/X86/X86ISelLowering.h +++ b/llvm/lib/Target/X86/X86ISelLowering.h @@ -1030,6 +1030,14 @@ namespace llvm { bool supportSwiftError() const override; + unsigned getMaxSupportedInterleaveFactor() const override { return 4; } + + /// \brief Lower interleaved load(s) into target specific + /// instructions/intrinsics. + bool lowerInterleavedLoad(LoadInst *LI, + ArrayRef Shuffles, + ArrayRef Indices, + unsigned Factor) const override; protected: std::pair findRepresentativeClass(const TargetRegisterInfo *TRI, diff --git a/llvm/lib/Target/X86/X86InterleavedAccess.cpp b/llvm/lib/Target/X86/X86InterleavedAccess.cpp new file mode 100644 index 0000000..0759a36 --- /dev/null +++ b/llvm/lib/Target/X86/X86InterleavedAccess.cpp @@ -0,0 +1,117 @@ +//===------- X86InterleavedAccess.cpp --------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file contains the X86 implementation of the interleaved accesses +// optimization generating X86-specific instructions/intrinsics for interleaved +// access groups. +// +//===----------------------------------------------------------------------===// + +#include "X86ISelLowering.h" +#include "X86TargetMachine.h" + +using namespace llvm; + +/// Returns true if the interleaved access group represented by the shuffles +/// is supported for the subtarget. Returns false otherwise. +static bool isSupported(const X86Subtarget &SubTarget, + const LoadInst *LI, + const ArrayRef &Shuffles, + unsigned Factor) { + + const DataLayout &DL = Shuffles[0]->getModule()->getDataLayout(); + VectorType *ShuffleVecTy = Shuffles[0]->getType(); + unsigned ShuffleVecSize = DL.getTypeSizeInBits(ShuffleVecTy); + Type *ShuffleEltTy = ShuffleVecTy->getVectorElementType(); + + if (DL.getTypeSizeInBits(LI->getType()) < Factor * ShuffleVecSize) + return false; + + // Currently, lowering is supported for 64 bits on AVX. + if (!SubTarget.hasAVX() || ShuffleVecSize != 256 || + DL.getTypeSizeInBits(ShuffleEltTy) != 64 || + Factor != 4) + return false; + + return true; +} + +/// \brief Lower interleaved load(s) into target specific instructions/ +/// intrinsics. Lowering sequence varies depending on the vector-types, factor, +/// number of shuffles and ISA. +/// Currently, lowering is supported for 4x64 bits with Factor = 4 on AVX. +bool X86TargetLowering::lowerInterleavedLoad( + LoadInst *LI, ArrayRef Shuffles, + ArrayRef Indices, unsigned Factor) const { + assert(Factor >= 2 && Factor <= getMaxSupportedInterleaveFactor() && + "Invalid interleave factor"); + assert(!Shuffles.empty() && "Empty shufflevector input"); + assert(Shuffles.size() == Indices.size() && + "Unmatched number of shufflevectors and indices"); + + if (!isSupported(Subtarget, LI, Shuffles, Factor)) + return false; + + VectorType *ShuffleVecTy = Shuffles[0]->getType(); + + Type *VecBasePtrTy = ShuffleVecTy->getPointerTo(LI->getPointerAddressSpace()); + + IRBuilder<> Builder(LI); + SmallVector NewLoads; + SmallVector NewShuffles; + NewShuffles.resize(Factor); + + Value *VecBasePtr = + Builder.CreateBitCast(LI->getPointerOperand(), VecBasePtrTy); + + // Generate 4 loads of type v4xT64 + for (unsigned Part = 0; Part < Factor; Part++) { + // TODO: Support inbounds GEP + Value *NewBasePtr = + Builder.CreateGEP(VecBasePtr, Builder.getInt32(Part)); + Instruction *NewLoad = + Builder.CreateAlignedLoad(NewBasePtr, LI->getAlignment()); + NewLoads.push_back(NewLoad); + } + + // dst = src1[0,1],src2[0,1] + uint32_t IntMask1[] = {0, 1, 4, 5}; + ArrayRef ShuffleMask = makeArrayRef(IntMask1, 4); + Value *IntrVec1 = + Builder.CreateShuffleVector(NewLoads[0], NewLoads[2], ShuffleMask); + Value *IntrVec2 = + Builder.CreateShuffleVector(NewLoads[1], NewLoads[3], ShuffleMask); + + // dst = src1[2,3],src2[2,3] + uint32_t IntMask2[] = {2, 3, 6, 7}; + ShuffleMask = makeArrayRef(IntMask2, 4); + Value *IntrVec3 = + Builder.CreateShuffleVector(NewLoads[0], NewLoads[2], ShuffleMask); + Value *IntrVec4 = + Builder.CreateShuffleVector(NewLoads[1], NewLoads[3], ShuffleMask); + + // dst = src1[0],src2[0],src1[2],src2[2] + uint32_t IntMask3[] = {0, 4, 2, 6}; + ShuffleMask = makeArrayRef(IntMask3, 4); + NewShuffles[0] = Builder.CreateShuffleVector(IntrVec1, IntrVec2, ShuffleMask); + NewShuffles[2] = Builder.CreateShuffleVector(IntrVec3, IntrVec4, ShuffleMask); + + // dst = src1[1],src2[1],src1[3],src2[3] + uint32_t IntMask4[] = {1, 5, 3, 7}; + ShuffleMask = makeArrayRef(IntMask4, 4); + NewShuffles[1] = Builder.CreateShuffleVector(IntrVec1, IntrVec2, ShuffleMask); + NewShuffles[3] = Builder.CreateShuffleVector(IntrVec3, IntrVec4, ShuffleMask); + + for (unsigned i = 0; i < Shuffles.size(); i++) { + unsigned Index = Indices[i]; + Shuffles[i]->replaceAllUsesWith(NewShuffles[Index]); + } + + return true; +} diff --git a/llvm/lib/Target/X86/X86TargetMachine.cpp b/llvm/lib/Target/X86/X86TargetMachine.cpp index 5dfa87b..773438d 100644 --- a/llvm/lib/Target/X86/X86TargetMachine.cpp +++ b/llvm/lib/Target/X86/X86TargetMachine.cpp @@ -269,6 +269,9 @@ void X86PassConfig::addIRPasses() { addPass(createAtomicExpandPass(&getX86TargetMachine())); TargetPassConfig::addIRPasses(); + + if (TM->getOptLevel() != CodeGenOpt::None) + addPass(createInterleavedAccessPass(TM)); } bool X86PassConfig::addInstSelector() { diff --git a/llvm/test/CodeGen/X86/x86-interleaved-access.ll b/llvm/test/CodeGen/X86/x86-interleaved-access.ll new file mode 100644 index 0000000..1fc1b43 --- /dev/null +++ b/llvm/test/CodeGen/X86/x86-interleaved-access.ll @@ -0,0 +1,129 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=x86_64-pc-linux -mattr=+avx < %s | FileCheck %s --check-prefix=AVX --check-prefix=AVX1 +; RUN: llc -mtriple=x86_64-pc-linux -mattr=+avx2 < %s | FileCheck %s --check-prefix=AVX --check-prefix=AVX2 + +define <4 x double> @load_factorf64_4(<16 x double>* %ptr) { +; AVX-LABEL: load_factorf64_4: +; AVX: # BB#0: +; AVX-NEXT: vmovupd (%rdi), %ymm0 +; AVX-NEXT: vmovupd 32(%rdi), %ymm1 +; AVX-NEXT: vmovupd 64(%rdi), %ymm2 +; AVX-NEXT: vmovupd 96(%rdi), %ymm3 +; AVX-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm4 +; AVX-NEXT: vinsertf128 $1, %xmm3, %ymm1, %ymm5 +; AVX-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],ymm2[2,3] +; AVX-NEXT: vperm2f128 {{.*#+}} ymm1 = ymm1[2,3],ymm3[2,3] +; AVX-NEXT: vunpcklpd {{.*#+}} ymm2 = ymm0[0],ymm1[0],ymm0[2],ymm1[2] +; AVX-NEXT: vunpckhpd {{.*#+}} ymm0 = ymm0[1],ymm1[1],ymm0[3],ymm1[3] +; AVX-NEXT: vhaddpd %ymm5, %ymm4, %ymm1 +; AVX-NEXT: vaddpd %ymm2, %ymm1, %ymm1 +; AVX-NEXT: vaddpd %ymm0, %ymm1, %ymm0 +; AVX-NEXT: retq + %wide.vec = load <16 x double>, <16 x double>* %ptr, align 16 + %strided.v0 = shufflevector <16 x double> %wide.vec, <16 x double> undef, <4 x i32> + %strided.v1 = shufflevector <16 x double> %wide.vec, <16 x double> undef, <4 x i32> + %strided.v2 = shufflevector <16 x double> %wide.vec, <16 x double> undef, <4 x i32> + %strided.v3 = shufflevector <16 x double> %wide.vec, <16 x double> undef, <4 x i32> + %add1 = fadd <4 x double> %strided.v0, %strided.v1 + %add2 = fadd <4 x double> %add1, %strided.v2 + %add3 = fadd <4 x double> %add2, %strided.v3 + ret <4 x double> %add3 +} + +define <4 x double> @load_factorf64_2(<16 x double>* %ptr) { +; AVX-LABEL: load_factorf64_2: +; AVX: # BB#0: +; AVX-NEXT: vmovupd (%rdi), %ymm0 +; AVX-NEXT: vmovupd 32(%rdi), %ymm1 +; AVX-NEXT: vmovupd 64(%rdi), %ymm2 +; AVX-NEXT: vmovupd 96(%rdi), %ymm3 +; AVX-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm4 +; AVX-NEXT: vinsertf128 $1, %xmm3, %ymm1, %ymm5 +; AVX-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],ymm2[2,3] +; AVX-NEXT: vperm2f128 {{.*#+}} ymm1 = ymm1[2,3],ymm3[2,3] +; AVX-NEXT: vunpcklpd {{.*#+}} ymm2 = ymm4[0],ymm5[0],ymm4[2],ymm5[2] +; AVX-NEXT: vunpckhpd {{.*#+}} ymm0 = ymm0[1],ymm1[1],ymm0[3],ymm1[3] +; AVX-NEXT: vmulpd %ymm0, %ymm2, %ymm0 +; AVX-NEXT: retq + %wide.vec = load <16 x double>, <16 x double>* %ptr, align 16 + %strided.v0 = shufflevector <16 x double> %wide.vec, <16 x double> undef, <4 x i32> + %strided.v3 = shufflevector <16 x double> %wide.vec, <16 x double> undef, <4 x i32> + %mul = fmul <4 x double> %strided.v0, %strided.v3 + ret <4 x double> %mul +} + +define <4 x double> @load_factorf64_1(<16 x double>* %ptr) { +; AVX-LABEL: load_factorf64_1: +; AVX: # BB#0: +; AVX-NEXT: vmovupd (%rdi), %ymm0 +; AVX-NEXT: vmovupd 32(%rdi), %ymm1 +; AVX-NEXT: vmovupd 64(%rdi), %ymm2 +; AVX-NEXT: vmovupd 96(%rdi), %ymm3 +; AVX-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0 +; AVX-NEXT: vinsertf128 $1, %xmm3, %ymm1, %ymm1 +; AVX-NEXT: vunpcklpd {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[2],ymm1[2] +; AVX-NEXT: vmulpd %ymm0, %ymm0, %ymm0 +; AVX-NEXT: retq + %wide.vec = load <16 x double>, <16 x double>* %ptr, align 16 + %strided.v0 = shufflevector <16 x double> %wide.vec, <16 x double> undef, <4 x i32> + %strided.v3 = shufflevector <16 x double> %wide.vec, <16 x double> undef, <4 x i32> + %mul = fmul <4 x double> %strided.v0, %strided.v3 + ret <4 x double> %mul +} + +define <4 x i64> @load_factori64_4(<16 x i64>* %ptr) { +; AVX1-LABEL: load_factori64_4: +; AVX1: # BB#0: +; AVX1-NEXT: vmovupd (%rdi), %ymm0 +; AVX1-NEXT: vmovupd 32(%rdi), %ymm1 +; AVX1-NEXT: vmovupd 64(%rdi), %ymm2 +; AVX1-NEXT: vmovupd 96(%rdi), %ymm3 +; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm4 +; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm1, %ymm5 +; AVX1-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],ymm2[2,3] +; AVX1-NEXT: vperm2f128 {{.*#+}} ymm1 = ymm1[2,3],ymm3[2,3] +; AVX1-NEXT: vunpcklpd {{.*#+}} ymm2 = ymm4[0],ymm5[0],ymm4[2],ymm5[2] +; AVX1-NEXT: vunpcklpd {{.*#+}} ymm3 = ymm0[0],ymm1[0],ymm0[2],ymm1[2] +; AVX1-NEXT: vunpckhpd {{.*#+}} ymm4 = ymm4[1],ymm5[1],ymm4[3],ymm5[3] +; AVX1-NEXT: vunpckhpd {{.*#+}} ymm0 = ymm0[1],ymm1[1],ymm0[3],ymm1[3] +; AVX1-NEXT: vextractf128 $1, %ymm4, %xmm1 +; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm5 +; AVX1-NEXT: vpaddq %xmm3, %xmm4, %xmm4 +; AVX1-NEXT: vextractf128 $1, %ymm3, %xmm3 +; AVX1-NEXT: vpaddq %xmm3, %xmm1, %xmm1 +; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3 +; AVX1-NEXT: vpaddq %xmm3, %xmm1, %xmm1 +; AVX1-NEXT: vpaddq %xmm1, %xmm5, %xmm1 +; AVX1-NEXT: vpaddq %xmm0, %xmm4, %xmm0 +; AVX1-NEXT: vpaddq %xmm0, %xmm2, %xmm0 +; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 +; AVX1-NEXT: retq +; +; AVX2-LABEL: load_factori64_4: +; AVX2: # BB#0: +; AVX2-NEXT: vmovdqu (%rdi), %ymm0 +; AVX2-NEXT: vmovdqu 32(%rdi), %ymm1 +; AVX2-NEXT: vmovdqu 64(%rdi), %ymm2 +; AVX2-NEXT: vmovdqu 96(%rdi), %ymm3 +; AVX2-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm4 +; AVX2-NEXT: vinserti128 $1, %xmm3, %ymm1, %ymm5 +; AVX2-NEXT: vperm2i128 {{.*#+}} ymm0 = ymm0[2,3],ymm2[2,3] +; AVX2-NEXT: vperm2i128 {{.*#+}} ymm1 = ymm1[2,3],ymm3[2,3] +; AVX2-NEXT: vpunpcklqdq {{.*#+}} ymm2 = ymm4[0],ymm5[0],ymm4[2],ymm5[2] +; AVX2-NEXT: vpunpcklqdq {{.*#+}} ymm3 = ymm0[0],ymm1[0],ymm0[2],ymm1[2] +; AVX2-NEXT: vpunpckhqdq {{.*#+}} ymm4 = ymm4[1],ymm5[1],ymm4[3],ymm5[3] +; AVX2-NEXT: vpunpckhqdq {{.*#+}} ymm0 = ymm0[1],ymm1[1],ymm0[3],ymm1[3] +; AVX2-NEXT: vpaddq %ymm3, %ymm4, %ymm1 +; AVX2-NEXT: vpaddq %ymm0, %ymm1, %ymm0 +; AVX2-NEXT: vpaddq %ymm0, %ymm2, %ymm0 +; AVX2-NEXT: retq + %wide.vec = load <16 x i64>, <16 x i64>* %ptr, align 16 + %strided.v0 = shufflevector <16 x i64> %wide.vec, <16 x i64> undef, <4 x i32> + %strided.v1 = shufflevector <16 x i64> %wide.vec, <16 x i64> undef, <4 x i32> + %strided.v2 = shufflevector <16 x i64> %wide.vec, <16 x i64> undef, <4 x i32> + %strided.v3 = shufflevector <16 x i64> %wide.vec, <16 x i64> undef, <4 x i32> + %add1 = add <4 x i64> %strided.v0, %strided.v1 + %add2 = add <4 x i64> %add1, %strided.v2 + %add3 = add <4 x i64> %add2, %strided.v3 + ret <4 x i64> %add3 +} diff --git a/llvm/test/Transforms/InterleavedAccess/X86/interleaved-accesses-64bits-avx.ll b/llvm/test/Transforms/InterleavedAccess/X86/interleaved-accesses-64bits-avx.ll new file mode 100644 index 0000000..bf2009e --- /dev/null +++ b/llvm/test/Transforms/InterleavedAccess/X86/interleaved-accesses-64bits-avx.ll @@ -0,0 +1,105 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py +; RUN: opt < %s -mtriple=x86_64-pc-linux -mattr=+avx -interleaved-access -S | FileCheck %s + +; This file tests the function `llvm::lowerInterleavedLoad`. + +define <4 x double> @load_factorf64_4(<16 x double>* %ptr) { +; CHECK-LABEL: @load_factorf64_4( +; CHECK-NEXT: [[TMP1:%.*]] = bitcast <16 x double>* %ptr to <4 x double>* +; CHECK-NEXT: [[TMP2:%.*]] = getelementptr <4 x double>, <4 x double>* [[TMP1]], i32 0 +; CHECK-NEXT: [[TMP3:%.*]] = load <4 x double>, <4 x double>* [[TMP2]], align 16 +; CHECK-NEXT: [[TMP4:%.*]] = getelementptr <4 x double>, <4 x double>* [[TMP1]], i32 1 +; CHECK-NEXT: [[TMP5:%.*]] = load <4 x double>, <4 x double>* [[TMP4]], align 16 +; CHECK-NEXT: [[TMP6:%.*]] = getelementptr <4 x double>, <4 x double>* [[TMP1]], i32 2 +; CHECK-NEXT: [[TMP7:%.*]] = load <4 x double>, <4 x double>* [[TMP6]], align 16 +; CHECK-NEXT: [[TMP8:%.*]] = getelementptr <4 x double>, <4 x double>* [[TMP1]], i32 3 +; CHECK-NEXT: [[TMP9:%.*]] = load <4 x double>, <4 x double>* [[TMP8]], align 16 +; CHECK-NEXT: [[TMP10:%.*]] = shufflevector <4 x double> [[TMP3]], <4 x double> [[TMP7]], <4 x i32> +; CHECK-NEXT: [[TMP11:%.*]] = shufflevector <4 x double> [[TMP5]], <4 x double> [[TMP9]], <4 x i32> +; CHECK-NEXT: [[TMP12:%.*]] = shufflevector <4 x double> [[TMP3]], <4 x double> [[TMP7]], <4 x i32> +; CHECK-NEXT: [[TMP13:%.*]] = shufflevector <4 x double> [[TMP5]], <4 x double> [[TMP9]], <4 x i32> +; CHECK-NEXT: [[TMP14:%.*]] = shufflevector <4 x double> [[TMP10]], <4 x double> [[TMP11]], <4 x i32> +; CHECK-NEXT: [[TMP15:%.*]] = shufflevector <4 x double> [[TMP12]], <4 x double> [[TMP13]], <4 x i32> +; CHECK-NEXT: [[TMP16:%.*]] = shufflevector <4 x double> [[TMP10]], <4 x double> [[TMP11]], <4 x i32> +; CHECK-NEXT: [[TMP17:%.*]] = shufflevector <4 x double> [[TMP12]], <4 x double> [[TMP13]], <4 x i32> +; CHECK-NEXT: [[ADD1:%.*]] = fadd <4 x double> [[TMP14]], [[TMP16]] +; CHECK-NEXT: [[ADD2:%.*]] = fadd <4 x double> [[ADD1]], [[TMP15]] +; CHECK-NEXT: [[ADD3:%.*]] = fadd <4 x double> [[ADD2]], [[TMP17]] +; CHECK-NEXT: ret <4 x double> [[ADD3]] +; + %wide.vec = load <16 x double>, <16 x double>* %ptr, align 16 + %strided.v0 = shufflevector <16 x double> %wide.vec, <16 x double> undef, <4 x i32> + %strided.v1 = shufflevector <16 x double> %wide.vec, <16 x double> undef, <4 x i32> + %strided.v2 = shufflevector <16 x double> %wide.vec, <16 x double> undef, <4 x i32> + %strided.v3 = shufflevector <16 x double> %wide.vec, <16 x double> undef, <4 x i32> + %add1 = fadd <4 x double> %strided.v0, %strided.v1 + %add2 = fadd <4 x double> %add1, %strided.v2 + %add3 = fadd <4 x double> %add2, %strided.v3 + ret <4 x double> %add3 +} + +define <4 x i64> @load_factori64_4(<16 x i64>* %ptr) { +; CHECK-LABEL: @load_factori64_4( +; CHECK-NEXT: [[TMP1:%.*]] = bitcast <16 x i64>* %ptr to <4 x i64>* +; CHECK-NEXT: [[TMP2:%.*]] = getelementptr <4 x i64>, <4 x i64>* [[TMP1]], i32 0 +; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i64>, <4 x i64>* [[TMP2]], align 16 +; CHECK-NEXT: [[TMP4:%.*]] = getelementptr <4 x i64>, <4 x i64>* [[TMP1]], i32 1 +; CHECK-NEXT: [[TMP5:%.*]] = load <4 x i64>, <4 x i64>* [[TMP4]], align 16 +; CHECK-NEXT: [[TMP6:%.*]] = getelementptr <4 x i64>, <4 x i64>* [[TMP1]], i32 2 +; CHECK-NEXT: [[TMP7:%.*]] = load <4 x i64>, <4 x i64>* [[TMP6]], align 16 +; CHECK-NEXT: [[TMP8:%.*]] = getelementptr <4 x i64>, <4 x i64>* [[TMP1]], i32 3 +; CHECK-NEXT: [[TMP9:%.*]] = load <4 x i64>, <4 x i64>* [[TMP8]], align 16 +; CHECK-NEXT: [[TMP10:%.*]] = shufflevector <4 x i64> [[TMP3]], <4 x i64> [[TMP7]], <4 x i32> +; CHECK-NEXT: [[TMP11:%.*]] = shufflevector <4 x i64> [[TMP5]], <4 x i64> [[TMP9]], <4 x i32> +; CHECK-NEXT: [[TMP12:%.*]] = shufflevector <4 x i64> [[TMP3]], <4 x i64> [[TMP7]], <4 x i32> +; CHECK-NEXT: [[TMP13:%.*]] = shufflevector <4 x i64> [[TMP5]], <4 x i64> [[TMP9]], <4 x i32> +; CHECK-NEXT: [[TMP14:%.*]] = shufflevector <4 x i64> [[TMP10]], <4 x i64> [[TMP11]], <4 x i32> +; CHECK-NEXT: [[TMP15:%.*]] = shufflevector <4 x i64> [[TMP12]], <4 x i64> [[TMP13]], <4 x i32> +; CHECK-NEXT: [[TMP16:%.*]] = shufflevector <4 x i64> [[TMP10]], <4 x i64> [[TMP11]], <4 x i32> +; CHECK-NEXT: [[TMP17:%.*]] = shufflevector <4 x i64> [[TMP12]], <4 x i64> [[TMP13]], <4 x i32> +; CHECK-NEXT: [[ADD1:%.*]] = add <4 x i64> [[TMP14]], [[TMP16]] +; CHECK-NEXT: [[ADD2:%.*]] = add <4 x i64> [[ADD1]], [[TMP15]] +; CHECK-NEXT: [[ADD3:%.*]] = add <4 x i64> [[ADD2]], [[TMP17]] +; CHECK-NEXT: ret <4 x i64> [[ADD3]] +; + %wide.vec = load <16 x i64>, <16 x i64>* %ptr, align 16 + %strided.v0 = shufflevector <16 x i64> %wide.vec, <16 x i64> undef, <4 x i32> + %strided.v1 = shufflevector <16 x i64> %wide.vec, <16 x i64> undef, <4 x i32> + %strided.v2 = shufflevector <16 x i64> %wide.vec, <16 x i64> undef, <4 x i32> + %strided.v3 = shufflevector <16 x i64> %wide.vec, <16 x i64> undef, <4 x i32> + %add1 = add <4 x i64> %strided.v0, %strided.v1 + %add2 = add <4 x i64> %add1, %strided.v2 + %add3 = add <4 x i64> %add2, %strided.v3 + ret <4 x i64> %add3 +} + +define <4 x double> @load_factorf64_1(<16 x double>* %ptr) { +; CHECK-LABEL: @load_factorf64_1( +; CHECK-NEXT: [[TMP1:%.*]] = bitcast <16 x double>* %ptr to <4 x double>* +; CHECK-NEXT: [[TMP2:%.*]] = getelementptr <4 x double>, <4 x double>* [[TMP1]], i32 0 +; CHECK-NEXT: [[TMP3:%.*]] = load <4 x double>, <4 x double>* [[TMP2]], align 16 +; CHECK-NEXT: [[TMP4:%.*]] = getelementptr <4 x double>, <4 x double>* [[TMP1]], i32 1 +; CHECK-NEXT: [[TMP5:%.*]] = load <4 x double>, <4 x double>* [[TMP4]], align 16 +; CHECK-NEXT: [[TMP6:%.*]] = getelementptr <4 x double>, <4 x double>* [[TMP1]], i32 2 +; CHECK-NEXT: [[TMP7:%.*]] = load <4 x double>, <4 x double>* [[TMP6]], align 16 +; CHECK-NEXT: [[TMP8:%.*]] = getelementptr <4 x double>, <4 x double>* [[TMP1]], i32 3 +; CHECK-NEXT: [[TMP9:%.*]] = load <4 x double>, <4 x double>* [[TMP8]], align 16 +; CHECK-NEXT: [[TMP10:%.*]] = shufflevector <4 x double> [[TMP3]], <4 x double> [[TMP7]], <4 x i32> +; CHECK-NEXT: [[TMP11:%.*]] = shufflevector <4 x double> [[TMP5]], <4 x double> [[TMP9]], <4 x i32> +; CHECK-NEXT: [[TMP12:%.*]] = shufflevector <4 x double> [[TMP3]], <4 x double> [[TMP7]], <4 x i32> +; CHECK-NEXT: [[TMP13:%.*]] = shufflevector <4 x double> [[TMP5]], <4 x double> [[TMP9]], <4 x i32> +; CHECK-NEXT: [[TMP14:%.*]] = shufflevector <4 x double> [[TMP10]], <4 x double> [[TMP11]], <4 x i32> +; CHECK-NEXT: [[TMP15:%.*]] = shufflevector <4 x double> [[TMP12]], <4 x double> [[TMP13]], <4 x i32> +; CHECK-NEXT: [[TMP16:%.*]] = shufflevector <4 x double> [[TMP10]], <4 x double> [[TMP11]], <4 x i32> +; CHECK-NEXT: [[TMP17:%.*]] = shufflevector <4 x double> [[TMP12]], <4 x double> [[TMP13]], <4 x i32> +; CHECK-NEXT: [[MUL:%.*]] = fmul <4 x double> [[TMP14]], [[TMP14]] +; CHECK-NEXT: ret <4 x double> [[MUL]] +; + %wide.vec = load <16 x double>, <16 x double>* %ptr, align 16 + %strided.v0 = shufflevector <16 x double> %wide.vec, <16 x double> undef, <4 x i32> + %strided.v3 = shufflevector <16 x double> %wide.vec, <16 x double> undef, <4 x i32> + %mul = fmul <4 x double> %strided.v0, %strided.v3 + ret <4 x double> %mul +} + + diff --git a/llvm/test/Transforms/InterleavedAccess/X86/lit.local.cfg b/llvm/test/Transforms/InterleavedAccess/X86/lit.local.cfg new file mode 100644 index 0000000..afde89b --- /dev/null +++ b/llvm/test/Transforms/InterleavedAccess/X86/lit.local.cfg @@ -0,0 +1,2 @@ +if not 'X86' in config.root.targets: + config.unsupported = True -- 2.7.4