From bc8a1ab26fba5d5635467b9d0fd7ad9a0fd5bc6e Mon Sep 17 00:00:00 2001 From: Guillaume Chatelet Date: Tue, 21 Jan 2020 11:21:31 +0100 Subject: [PATCH] [Alignment][NFC] Use Align with CreateMaskedLoad Summary: This is patch is part of a series to introduce an Alignment type. See this thread for context: http://lists.llvm.org/pipermail/llvm-dev/2019-July/133851.html See this patch for the introduction of the type: https://reviews.llvm.org/D64790 Reviewers: courbet Subscribers: hiraditya, cfe-commits, llvm-commits Tags: #clang, #llvm Differential Revision: https://reviews.llvm.org/D73087 --- clang/lib/CodeGen/CGBuiltin.cpp | 19 +++++++++---------- llvm/include/llvm/Analysis/VectorUtils.h | 1 + llvm/include/llvm/IR/IRBuilder.h | 9 ++++++++- llvm/lib/IR/AutoUpgrade.cpp | 9 +++++---- llvm/lib/IR/IRBuilder.cpp | 18 +++++++++--------- llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp | 2 +- .../Transforms/Instrumentation/MemorySanitizer.cpp | 5 ++--- llvm/lib/Transforms/Vectorize/LoopVectorize.cpp | 6 +++--- 8 files changed, 38 insertions(+), 31 deletions(-) diff --git a/clang/lib/CodeGen/CGBuiltin.cpp b/clang/lib/CodeGen/CGBuiltin.cpp index 9986ea4..8d00d3d 100644 --- a/clang/lib/CodeGen/CGBuiltin.cpp +++ b/clang/lib/CodeGen/CGBuiltin.cpp @@ -9727,8 +9727,8 @@ static Value *EmitX86MaskedStore(CodeGenFunction &CGF, return CGF.Builder.CreateMaskedStore(Ops[1], Ptr, Align, MaskVec); } -static Value *EmitX86MaskedLoad(CodeGenFunction &CGF, - ArrayRef Ops, unsigned Align) { +static Value *EmitX86MaskedLoad(CodeGenFunction &CGF, ArrayRef Ops, + Align Alignment) { // Cast the pointer to right type. Value *Ptr = CGF.Builder.CreateBitCast(Ops[0], llvm::PointerType::getUnqual(Ops[1]->getType())); @@ -9736,7 +9736,7 @@ static Value *EmitX86MaskedLoad(CodeGenFunction &CGF, Value *MaskVec = getMaskVecValue(CGF, Ops[2], Ops[1]->getType()->getVectorNumElements()); - return CGF.Builder.CreateMaskedLoad(Ptr, Align, MaskVec, Ops[1]); + return CGF.Builder.CreateMaskedLoad(Ptr, Alignment, MaskVec, Ops[1]); } static Value *EmitX86ExpandLoad(CodeGenFunction &CGF, @@ -10731,11 +10731,11 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID, case X86::BI__builtin_ia32_loaddqudi128_mask: case X86::BI__builtin_ia32_loaddqudi256_mask: case X86::BI__builtin_ia32_loaddqudi512_mask: - return EmitX86MaskedLoad(*this, Ops, 1); + return EmitX86MaskedLoad(*this, Ops, Align::None()); case X86::BI__builtin_ia32_loadss128_mask: case X86::BI__builtin_ia32_loadsd128_mask: - return EmitX86MaskedLoad(*this, Ops, 1); + return EmitX86MaskedLoad(*this, Ops, Align::None()); case X86::BI__builtin_ia32_loadaps128_mask: case X86::BI__builtin_ia32_loadaps256_mask: @@ -10748,11 +10748,10 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID, case X86::BI__builtin_ia32_movdqa32load512_mask: case X86::BI__builtin_ia32_movdqa64load128_mask: case X86::BI__builtin_ia32_movdqa64load256_mask: - case X86::BI__builtin_ia32_movdqa64load512_mask: { - unsigned Align = - getContext().getTypeAlignInChars(E->getArg(1)->getType()).getQuantity(); - return EmitX86MaskedLoad(*this, Ops, Align); - } + case X86::BI__builtin_ia32_movdqa64load512_mask: + return EmitX86MaskedLoad( + *this, Ops, + getContext().getTypeAlignInChars(E->getArg(1)->getType()).getAsAlign()); case X86::BI__builtin_ia32_expandloaddf128_mask: case X86::BI__builtin_ia32_expandloaddf256_mask: diff --git a/llvm/include/llvm/Analysis/VectorUtils.h b/llvm/include/llvm/Analysis/VectorUtils.h index dd42b4f..8b465ca 100644 --- a/llvm/include/llvm/Analysis/VectorUtils.h +++ b/llvm/include/llvm/Analysis/VectorUtils.h @@ -509,6 +509,7 @@ public: bool isReverse() const { return Reverse; } uint32_t getFactor() const { return Factor; } uint32_t getAlignment() const { return Alignment.value(); } + Align getAlign() const { return Alignment; } uint32_t getNumMembers() const { return Members.size(); } /// Try to insert a new member \p Instr with index \p Index and diff --git a/llvm/include/llvm/IR/IRBuilder.h b/llvm/include/llvm/IR/IRBuilder.h index 6f6d6db..9341810 100644 --- a/llvm/include/llvm/IR/IRBuilder.h +++ b/llvm/include/llvm/IR/IRBuilder.h @@ -727,7 +727,14 @@ public: CallInst *CreateInvariantStart(Value *Ptr, ConstantInt *Size = nullptr); /// Create a call to Masked Load intrinsic - CallInst *CreateMaskedLoad(Value *Ptr, unsigned Align, Value *Mask, + LLVM_ATTRIBUTE_DEPRECATED( + CallInst *CreateMaskedLoad(Value *Ptr, unsigned Alignment, Value *Mask, + Value *PassThru = nullptr, + const Twine &Name = ""), + "Use the version that takes Align instead") { + return CreateMaskedLoad(Ptr, Align(Alignment), Mask, PassThru, Name); + } + CallInst *CreateMaskedLoad(Value *Ptr, Align Alignment, Value *Mask, Value *PassThru = nullptr, const Twine &Name = ""); /// Create a call to Masked Store intrinsic diff --git a/llvm/lib/IR/AutoUpgrade.cpp b/llvm/lib/IR/AutoUpgrade.cpp index 6e2beeb..f587414 100644 --- a/llvm/lib/IR/AutoUpgrade.cpp +++ b/llvm/lib/IR/AutoUpgrade.cpp @@ -1257,18 +1257,19 @@ static Value *UpgradeMaskedLoad(IRBuilder<> &Builder, Type *ValTy = Passthru->getType(); // Cast the pointer to the right type. Ptr = Builder.CreateBitCast(Ptr, llvm::PointerType::getUnqual(ValTy)); - unsigned Align = - Aligned ? cast(Passthru->getType())->getBitWidth() / 8 : 1; + const Align Alignment = + Aligned ? Align(cast(Passthru->getType())->getBitWidth() / 8) + : Align::None(); // If the mask is all ones just emit a regular store. if (const auto *C = dyn_cast(Mask)) if (C->isAllOnesValue()) - return Builder.CreateAlignedLoad(ValTy, Ptr, Align); + return Builder.CreateAlignedLoad(ValTy, Ptr, Alignment); // Convert the mask from an integer type to a vector of i1. unsigned NumElts = Passthru->getType()->getVectorNumElements(); Mask = getX86MaskVec(Builder, Mask, NumElts); - return Builder.CreateMaskedLoad(Ptr, Align, Mask, Passthru); + return Builder.CreateMaskedLoad(Ptr, Alignment, Mask, Passthru); } static Value *upgradeAbs(IRBuilder<> &Builder, CallInst &CI) { diff --git a/llvm/lib/IR/IRBuilder.cpp b/llvm/lib/IR/IRBuilder.cpp index 32daf40..2a6b251 100644 --- a/llvm/lib/IR/IRBuilder.cpp +++ b/llvm/lib/IR/IRBuilder.cpp @@ -466,14 +466,14 @@ CallInst *IRBuilderBase::CreateAssumption(Value *Cond) { } /// Create a call to a Masked Load intrinsic. -/// \p Ptr - base pointer for the load -/// \p Align - alignment of the source location -/// \p Mask - vector of booleans which indicates what vector lanes should -/// be accessed in memory -/// \p PassThru - pass-through value that is used to fill the masked-off lanes -/// of the result -/// \p Name - name of the result variable -CallInst *IRBuilderBase::CreateMaskedLoad(Value *Ptr, unsigned Align, +/// \p Ptr - base pointer for the load +/// \p Alignment - alignment of the source location +/// \p Mask - vector of booleans which indicates what vector lanes should +/// be accessed in memory +/// \p PassThru - pass-through value that is used to fill the masked-off lanes +/// of the result +/// \p Name - name of the result variable +CallInst *IRBuilderBase::CreateMaskedLoad(Value *Ptr, Align Alignment, Value *Mask, Value *PassThru, const Twine &Name) { auto *PtrTy = cast(Ptr->getType()); @@ -483,7 +483,7 @@ CallInst *IRBuilderBase::CreateMaskedLoad(Value *Ptr, unsigned Align, if (!PassThru) PassThru = UndefValue::get(DataTy); Type *OverloadedTypes[] = { DataTy, PtrTy }; - Value *Ops[] = { Ptr, getInt32(Align), Mask, PassThru}; + Value *Ops[] = {Ptr, getInt32(Alignment.value()), Mask, PassThru}; return CreateMaskedIntrinsic(Intrinsic::masked_load, Ops, OverloadedTypes, Name); } diff --git a/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp b/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp index 74ee88f4..a7aac58 100644 --- a/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp +++ b/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp @@ -1331,7 +1331,7 @@ static Instruction *simplifyX86MaskedLoad(IntrinsicInst &II, InstCombiner &IC) { // The pass-through vector for an x86 masked load is a zero vector. CallInst *NewMaskedLoad = - IC.Builder.CreateMaskedLoad(PtrCast, 1, BoolMask, ZeroVec); + IC.Builder.CreateMaskedLoad(PtrCast, Align::None(), BoolMask, ZeroVec); return IC.replaceInstUsesWith(II, NewMaskedLoad); } diff --git a/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp b/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp index 80acab3..45b8f83 100644 --- a/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp +++ b/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp @@ -2945,9 +2945,8 @@ struct MemorySanitizerVisitor : public InstVisitor { if (PropagateShadow) { std::tie(ShadowPtr, OriginPtr) = getShadowOriginPtr(Addr, IRB, ShadowTy, Alignment, /*isStore*/ false); - setShadow(&I, IRB.CreateMaskedLoad( - ShadowPtr, Alignment ? Alignment->value() : 0, Mask, - getShadow(PassThru), "_msmaskedld")); + setShadow(&I, IRB.CreateMaskedLoad(ShadowPtr, *Alignment, Mask, + getShadow(PassThru), "_msmaskedld")); } else { setShadow(&I, getCleanShadow(&I)); } diff --git a/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp b/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp index b165071..0f25ff8 100644 --- a/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp +++ b/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp @@ -2263,7 +2263,7 @@ void InnerLoopVectorizer::vectorizeInterleaveGroup(Instruction *Instr, : ShuffledMask; } NewLoad = - Builder.CreateMaskedLoad(AddrParts[Part], Group->getAlignment(), + Builder.CreateMaskedLoad(AddrParts[Part], Group->getAlign(), GroupMask, UndefVec, "wide.masked.vec"); } else @@ -2475,8 +2475,8 @@ void InnerLoopVectorizer::vectorizeMemoryInstruction(Instruction *Instr, auto *VecPtr = CreateVecPtr(Part, State.get(Addr, {0, 0})); if (isMaskRequired) NewLI = Builder.CreateMaskedLoad( - VecPtr, Alignment.value(), BlockInMaskParts[Part], - UndefValue::get(DataTy), "wide.masked.load"); + VecPtr, Alignment, BlockInMaskParts[Part], UndefValue::get(DataTy), + "wide.masked.load"); else NewLI = Builder.CreateAlignedLoad(DataTy, VecPtr, Alignment.value(), "wide.load"); -- 2.7.4