From: Guillaume Chatelet Date: Mon, 8 Jun 2020 11:15:28 +0000 (+0000) Subject: [Alignment][NFC] TargetLowering::allowsMisalignedMemoryAccesses X-Git-Url: http://review.tizen.org/git/?a=commitdiff_plain;h=3b6196c9b3283b3a9e57c899616ac2089ea4c8b7;p=platform%2Fupstream%2Fllvm.git [Alignment][NFC] TargetLowering::allowsMisalignedMemoryAccesses Summary: Note to downstream target maintainers: this might silently change the semantics of your code if you override `TargetLowering::allowsMisalignedMemoryAccesses` without marking it override. This patch is part of a series to introduce an Alignment type. See this thread for context: http://lists.llvm.org/pipermail/llvm-dev/2019-July/133851.html See this patch for the introduction of the type: https://reviews.llvm.org/D64790 Reviewers: courbet Subscribers: hiraditya, llvm-commits Tags: #llvm Differential Revision: https://reviews.llvm.org/D81374 --- diff --git a/llvm/include/llvm/CodeGen/TargetLowering.h b/llvm/include/llvm/CodeGen/TargetLowering.h index d75f113..934cdd5 100644 --- a/llvm/include/llvm/CodeGen/TargetLowering.h +++ b/llvm/include/llvm/CodeGen/TargetLowering.h @@ -1578,7 +1578,7 @@ public: /// LLT handling variant. virtual bool allowsMisalignedMemoryAccesses( - LLT, unsigned AddrSpace = 0, unsigned Align = 1, + LLT, unsigned AddrSpace = 0, Align Alignment = Align(1), MachineMemOperand::Flags Flags = MachineMemOperand::MONone, bool * /*Fast*/ = nullptr) const { return false; diff --git a/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp b/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp index 35196b5..c4ed638 100644 --- a/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp +++ b/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp @@ -867,8 +867,7 @@ static bool findGISelOptimalMemOpLowering(std::vector &MemOps, Ty = LLT::scalar(64); if (Op.isFixedDstAlign()) while (Op.getDstAlign() < Ty.getSizeInBytes() && - !TLI.allowsMisalignedMemoryAccesses(Ty, DstAS, - Op.getDstAlign().value())) + !TLI.allowsMisalignedMemoryAccesses(Ty, DstAS, Op.getDstAlign())) Ty = LLT::scalar(Ty.getSizeInBytes()); assert(Ty.getSizeInBits() > 0 && "Could not find valid type"); // FIXME: check for the largest legal type we can load/store to. diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp index 854c946..a3fec1d 100644 --- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp +++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp @@ -1256,7 +1256,7 @@ bool AArch64TargetLowering::allowsMisalignedMemoryAccesses( // Same as above but handling LLTs instead. bool AArch64TargetLowering::allowsMisalignedMemoryAccesses( - LLT Ty, unsigned AddrSpace, unsigned Align, MachineMemOperand::Flags Flags, + LLT Ty, unsigned AddrSpace, Align Alignment, MachineMemOperand::Flags Flags, bool *Fast) const { if (Subtarget->requiresStrictAlign()) return false; @@ -1271,7 +1271,7 @@ bool AArch64TargetLowering::allowsMisalignedMemoryAccesses( // Code that uses clang vector extensions can mark that it // wants unaligned accesses to be treated as fast by // underspecifying alignment to be 1 or 2. - Align <= 2 || + Alignment <= 2 || // Disregard v2i64. Memcpy lowering produces those and splitting // them regresses performance on micro-benchmarks and olden/bh. diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.h b/llvm/lib/Target/AArch64/AArch64ISelLowering.h index 2a68220..6d47fc8 100644 --- a/llvm/lib/Target/AArch64/AArch64ISelLowering.h +++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.h @@ -413,9 +413,10 @@ public: MachineMemOperand::Flags Flags = MachineMemOperand::MONone, bool *Fast = nullptr) const override; /// LLT variant. - bool allowsMisalignedMemoryAccesses( - LLT Ty, unsigned AddrSpace, unsigned Align, MachineMemOperand::Flags Flags, - bool *Fast = nullptr) const override; + bool allowsMisalignedMemoryAccesses(LLT Ty, unsigned AddrSpace, + Align Alignment, + MachineMemOperand::Flags Flags, + bool *Fast = nullptr) const override; /// Provide custom lowering hooks for some operations. SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const override;