/// Determine if the target supports unaligned memory accesses.
bool allowsMisalignedMemoryAccesses(LLVMContext &Context, unsigned BitWidth,
unsigned AddressSpace = 0,
- unsigned Alignment = 1,
+ Align Alignment = Align(1),
bool *Fast = nullptr) const;
/// Return hardware support for population count.
virtual bool allowsMisalignedMemoryAccesses(LLVMContext &Context,
unsigned BitWidth,
unsigned AddressSpace,
- unsigned Alignment,
+ Align Alignment,
bool *Fast) = 0;
virtual PopcntSupportKind getPopcntSupport(unsigned IntTyWidthInBit) = 0;
virtual bool haveFastSqrt(Type *Ty) = 0;
return Impl.isFPVectorizationPotentiallyUnsafe();
}
bool allowsMisalignedMemoryAccesses(LLVMContext &Context, unsigned BitWidth,
- unsigned AddressSpace, unsigned Alignment,
+ unsigned AddressSpace, Align Alignment,
bool *Fast) override {
return Impl.allowsMisalignedMemoryAccesses(Context, BitWidth, AddressSpace,
Alignment, Fast);
bool isFPVectorizationPotentiallyUnsafe() const { return false; }
bool allowsMisalignedMemoryAccesses(LLVMContext &Context, unsigned BitWidth,
- unsigned AddressSpace, unsigned Alignment,
+ unsigned AddressSpace, Align Alignment,
bool *Fast) const {
return false;
}
/// \name Scalar TTI Implementations
/// @{
bool allowsMisalignedMemoryAccesses(LLVMContext &Context, unsigned BitWidth,
- unsigned AddressSpace, unsigned Alignment,
+ unsigned AddressSpace, Align Alignment,
bool *Fast) const {
EVT E = EVT::getIntegerVT(Context, BitWidth);
return getTLI()->allowsMisalignedMemoryAccesses(
/// helps to ensure that such replacements don't generate code that causes an
/// alignment error (trap) on the target machine.
virtual bool allowsMisalignedMemoryAccesses(
- EVT, unsigned AddrSpace = 0, unsigned Align = 1,
+ EVT, unsigned AddrSpace = 0, Align Alignment = Align(1),
MachineMemOperand::Flags Flags = MachineMemOperand::MONone,
bool * /*Fast*/ = nullptr) const {
return false;
bool TargetTransformInfo::allowsMisalignedMemoryAccesses(LLVMContext &Context,
unsigned BitWidth,
unsigned AddressSpace,
- unsigned Alignment,
+ Align Alignment,
bool *Fast) const {
return TTIImpl->allowsMisalignedMemoryAccesses(Context, BitWidth,
AddressSpace, Alignment, Fast);
StoreInst *ST = cast<StoreInst>(CombineInst);
unsigned AS = ST->getPointerAddressSpace();
- unsigned Align = ST->getAlignment();
+ Align Alignment = ST->getAlign();
// Check if this store is supported.
if (!TLI.allowsMisalignedMemoryAccesses(
TLI.getValueType(DL, ST->getValueOperand()->getType()), AS,
- Align)) {
+ Alignment)) {
// If this is not supported, there is no way we can combine
// the extract with the store.
return false;
MVT VT = getMVTForLLT(Ty);
if (NumMemOps && Op.allowOverlap() && NewTySize < Size &&
TLI.allowsMisalignedMemoryAccesses(
- VT, DstAS, Op.isFixedDstAlign() ? Op.getDstAlign().value() : 0,
+ VT, DstAS, Op.isFixedDstAlign() ? Op.getDstAlign() : Align(1),
MachineMemOperand::MONone, &Fast) &&
Fast)
TySize = Size;
// equal to DstAlign (or zero).
VT = MVT::i64;
if (Op.isFixedDstAlign())
- while (
- Op.getDstAlign() < (VT.getSizeInBits() / 8) &&
- !allowsMisalignedMemoryAccesses(VT, DstAS, Op.getDstAlign().value()))
+ while (Op.getDstAlign() < (VT.getSizeInBits() / 8) &&
+ !allowsMisalignedMemoryAccesses(VT, DstAS, Op.getDstAlign()))
VT = (MVT::SimpleValueType)(VT.getSimpleVT().SimpleTy - 1);
assert(VT.isInteger());
bool Fast;
if (NumMemOps && Op.allowOverlap() && NewVTSize < Size &&
allowsMisalignedMemoryAccesses(
- VT, DstAS, Op.isFixedDstAlign() ? Op.getDstAlign().value() : 1,
+ VT, DstAS, Op.isFixedDstAlign() ? Op.getDstAlign() : Align(1),
MachineMemOperand::MONone, &Fast) &&
Fast)
VTSize = Size;
}
// This is a misaligned access.
- return allowsMisalignedMemoryAccesses(VT, AddrSpace, Alignment.value(), Flags,
- Fast);
+ return allowsMisalignedMemoryAccesses(VT, AddrSpace, Alignment, Flags, Fast);
}
bool TargetLoweringBase::allowsMemoryAccessForAlignment(
}
bool AArch64TargetLowering::allowsMisalignedMemoryAccesses(
- EVT VT, unsigned AddrSpace, unsigned Align, MachineMemOperand::Flags Flags,
+ EVT VT, unsigned AddrSpace, Align Alignment, MachineMemOperand::Flags Flags,
bool *Fast) const {
if (Subtarget->requiresStrictAlign())
return false;
// Code that uses clang vector extensions can mark that it
// wants unaligned accesses to be treated as fast by
// underspecifying alignment to be 1 or 2.
- Align <= 2 ||
+ Alignment <= 2 ||
// Disregard v2i64. Memcpy lowering produces those and splitting
// them regresses performance on micro-benchmarks and olden/bh.
unsigned AS = StoreNode->getAddressSpace();
Align Alignment = StoreNode->getAlign();
if (Alignment < MemVT.getStoreSize() &&
- !allowsMisalignedMemoryAccesses(MemVT, AS, Alignment.value(),
+ !allowsMisalignedMemoryAccesses(MemVT, AS, Alignment,
StoreNode->getMemOperand()->getFlags(),
nullptr)) {
return scalarizeVectorStore(StoreNode, DAG);
if (Op.isAligned(AlignCheck))
return true;
bool Fast;
- return allowsMisalignedMemoryAccesses(VT, 0, 1, MachineMemOperand::MONone,
- &Fast) &&
+ return allowsMisalignedMemoryAccesses(VT, 0, Align(1),
+ MachineMemOperand::MONone, &Fast) &&
Fast;
};
if (Op.isAligned(AlignCheck))
return true;
bool Fast;
- return allowsMisalignedMemoryAccesses(VT, 0, 1, MachineMemOperand::MONone,
- &Fast) &&
+ return allowsMisalignedMemoryAccesses(VT, 0, Align(1),
+ MachineMemOperand::MONone, &Fast) &&
Fast;
};
/// Returns true if the target allows unaligned memory accesses of the
/// specified type.
bool allowsMisalignedMemoryAccesses(
- EVT VT, unsigned AddrSpace = 0, unsigned Align = 1,
+ EVT VT, unsigned AddrSpace = 0, Align Alignment = Align(1),
MachineMemOperand::Flags Flags = MachineMemOperand::MONone,
bool *Fast = nullptr) const override;
/// LLT variant.
// Expand unaligned loads earlier than legalization. Due to visitation order
// problems during legalization, the emitted instructions to pack and unpack
// the bytes again are not eliminated in the case of an unaligned copy.
- if (!allowsMisalignedMemoryAccesses(VT, AS, Alignment.value(),
- LN->getMemOperand()->getFlags(),
- &IsFast)) {
+ if (!allowsMisalignedMemoryAccesses(
+ VT, AS, Alignment, LN->getMemOperand()->getFlags(), &IsFast)) {
SDValue Ops[2];
if (VT.isVector())
// order problems during legalization, the emitted instructions to pack and
// unpack the bytes again are not eliminated in the case of an unaligned
// copy.
- if (!allowsMisalignedMemoryAccesses(VT, AS, Alignment.value(),
- SN->getMemOperand()->getFlags(),
- &IsFast)) {
+ if (!allowsMisalignedMemoryAccesses(
+ VT, AS, Alignment, SN->getMemOperand()->getFlags(), &IsFast)) {
if (VT.isVector())
return scalarizeVectorStore(SN, DAG);
Align Alignment = StoreNode->getAlign();
if (Alignment < MemVT.getStoreSize() &&
- !allowsMisalignedMemoryAccesses(MemVT, AS, Alignment.value(),
+ !allowsMisalignedMemoryAccesses(MemVT, AS, Alignment,
StoreNode->getMemOperand()->getFlags(),
nullptr)) {
return expandUnalignedStore(StoreNode, DAG);
}
bool R600TargetLowering::allowsMisalignedMemoryAccesses(
- EVT VT, unsigned AddrSpace, unsigned Align, MachineMemOperand::Flags Flags,
+ EVT VT, unsigned AddrSpace, Align Alignment, MachineMemOperand::Flags Flags,
bool *IsFast) const {
if (IsFast)
*IsFast = false;
if (IsFast)
*IsFast = true;
- return VT.bitsGT(MVT::i32) && Align % 4 == 0;
+ return VT.bitsGT(MVT::i32) && Alignment.value() % 4 == 0;
}
static SDValue CompactSwizzlableVector(
const SelectionDAG &DAG) const override;
bool allowsMisalignedMemoryAccesses(
- EVT VT, unsigned AS, unsigned Align,
+ EVT VT, unsigned AS, Align Alignment,
MachineMemOperand::Flags Flags = MachineMemOperand::MONone,
bool *IsFast = nullptr) const override;
}
bool SITargetLowering::allowsMisalignedMemoryAccesses(
- EVT VT, unsigned AddrSpace, unsigned Alignment,
- MachineMemOperand::Flags Flags, bool *IsFast) const {
+ EVT VT, unsigned AddrSpace, Align Alignment, MachineMemOperand::Flags Flags,
+ bool *IsFast) const {
if (IsFast)
*IsFast = false;
}
return allowsMisalignedMemoryAccessesImpl(VT.getSizeInBits(), AddrSpace,
- Align(Alignment), Flags, IsFast);
+ Alignment, Flags, IsFast);
}
EVT SITargetLowering::getOptimalMemOpType(
}
bool allowsMisalignedMemoryAccesses(
- EVT VT, unsigned AS, unsigned Alignment,
+ EVT VT, unsigned AS, Align Alignment,
MachineMemOperand::Flags Flags = MachineMemOperand::MONone,
bool *IsFast = nullptr) const override;
}
bool ARMTargetLowering::allowsMisalignedMemoryAccesses(EVT VT, unsigned,
- unsigned Alignment,
+ Align Alignment,
MachineMemOperand::Flags,
bool *Fast) const {
// Depends what it gets converted into if the type is weird.
bool Fast;
if (Op.size() >= 16 &&
(Op.isAligned(Align(16)) ||
- (allowsMisalignedMemoryAccesses(MVT::v2f64, 0, 1,
+ (allowsMisalignedMemoryAccesses(MVT::v2f64, 0, Align(1),
MachineMemOperand::MONone, &Fast) &&
Fast))) {
return MVT::v2f64;
} else if (Op.size() >= 8 &&
(Op.isAligned(Align(8)) ||
(allowsMisalignedMemoryAccesses(
- MVT::f64, 0, 1, MachineMemOperand::MONone, &Fast) &&
+ MVT::f64, 0, Align(1), MachineMemOperand::MONone, &Fast) &&
Fast))) {
return MVT::f64;
}
/// unaligned memory accesses of the specified type. Returns whether it
/// is "fast" by reference in the second argument.
bool allowsMisalignedMemoryAccesses(EVT VT, unsigned AddrSpace,
- unsigned Align,
+ Align Alignment,
MachineMemOperand::Flags Flags,
bool *Fast) const override;
}
bool HexagonTargetLowering::allowsMisalignedMemoryAccesses(
- EVT VT, unsigned AddrSpace, unsigned Alignment,
- MachineMemOperand::Flags Flags, bool *Fast) const {
+ EVT VT, unsigned AddrSpace, Align Alignment, MachineMemOperand::Flags Flags,
+ bool *Fast) const {
MVT SVT = VT.getSimpleVT();
if (Subtarget.isHVXVectorType(SVT, true))
return allowsHvxMisalignedMemoryAccesses(SVT, Flags, Fast);
bool *Fast) const override;
bool allowsMisalignedMemoryAccesses(EVT VT, unsigned AddrSpace,
- unsigned Alignment, MachineMemOperand::Flags Flags, bool *Fast)
- const override;
+ Align Alignment,
+ MachineMemOperand::Flags Flags,
+ bool *Fast) const override;
/// Returns relocation base for the given PIC jumptable.
SDValue getPICJumpTableRelocBase(SDValue Table, SelectionDAG &DAG)
}
bool Mips16TargetLowering::allowsMisalignedMemoryAccesses(
- EVT VT, unsigned, unsigned, MachineMemOperand::Flags, bool *Fast) const {
+ EVT VT, unsigned, Align, MachineMemOperand::Flags, bool *Fast) const {
return false;
}
const MipsSubtarget &STI);
bool allowsMisalignedMemoryAccesses(EVT VT, unsigned AddrSpace,
- unsigned Align,
+ Align Alignment,
MachineMemOperand::Flags Flags,
bool *Fast) const override;
}
bool MipsSETargetLowering::allowsMisalignedMemoryAccesses(
- EVT VT, unsigned, unsigned, MachineMemOperand::Flags, bool *Fast) const {
+ EVT VT, unsigned, Align, MachineMemOperand::Flags, bool *Fast) const {
MVT::SimpleValueType SVT = VT.getSimpleVT().SimpleTy;
if (Subtarget.systemSupportsUnalignedAccess()) {
const TargetRegisterClass *RC);
bool allowsMisalignedMemoryAccesses(
- EVT VT, unsigned AS = 0, unsigned Align = 1,
+ EVT VT, unsigned AS = 0, Align Alignment = Align(1),
MachineMemOperand::Flags Flags = MachineMemOperand::MONone,
bool *Fast = nullptr) const override;
return isInt<16>(Imm) || isUInt<16>(Imm);
}
-bool PPCTargetLowering::allowsMisalignedMemoryAccesses(EVT VT,
- unsigned,
- unsigned,
+bool PPCTargetLowering::allowsMisalignedMemoryAccesses(EVT VT, unsigned, Align,
MachineMemOperand::Flags,
bool *Fast) const {
if (DisablePPCUnaligned)
/// Is unaligned memory access allowed for the given type, and is it fast
/// relative to software emulation.
bool allowsMisalignedMemoryAccesses(
- EVT VT, unsigned AddrSpace, unsigned Align = 1,
+ EVT VT, unsigned AddrSpace, Align Alignment = Align(1),
MachineMemOperand::Flags Flags = MachineMemOperand::MONone,
bool *Fast = nullptr) const override;
}
bool SystemZTargetLowering::allowsMisalignedMemoryAccesses(
- EVT VT, unsigned, unsigned, MachineMemOperand::Flags, bool *Fast) const {
+ EVT VT, unsigned, Align, MachineMemOperand::Flags, bool *Fast) const {
// Unaligned accesses should never be slower than the expanded version.
// We check specifically for aligned accesses in the few cases where
// they are required.
bool isLegalAddressingMode(const DataLayout &DL, const AddrMode &AM, Type *Ty,
unsigned AS,
Instruction *I = nullptr) const override;
- bool allowsMisalignedMemoryAccesses(EVT VT, unsigned AS,
- unsigned Align,
+ bool allowsMisalignedMemoryAccesses(EVT VT, unsigned AS, Align Alignment,
MachineMemOperand::Flags Flags,
bool *Fast) const override;
bool isTruncateFree(Type *, Type *) const override;
}
bool WebAssemblyTargetLowering::allowsMisalignedMemoryAccesses(
- EVT /*VT*/, unsigned /*AddrSpace*/, unsigned /*Align*/,
+ EVT /*VT*/, unsigned /*AddrSpace*/, Align /*Align*/,
MachineMemOperand::Flags /*Flags*/, bool *Fast) const {
// WebAssembly supports unaligned accesses, though it should be declared
// with the p2align attribute on loads and stores which do so, and there
bool isLegalAddressingMode(const DataLayout &DL, const AddrMode &AM, Type *Ty,
unsigned AS,
Instruction *I = nullptr) const override;
- bool allowsMisalignedMemoryAccesses(EVT, unsigned AddrSpace, unsigned Align,
+ bool allowsMisalignedMemoryAccesses(EVT, unsigned AddrSpace, Align Alignment,
MachineMemOperand::Flags Flags,
bool *Fast) const override;
bool isIntDivCheap(EVT VT, AttributeList Attr) const override;
}
bool X86TargetLowering::allowsMisalignedMemoryAccesses(
- EVT VT, unsigned, unsigned Align, MachineMemOperand::Flags Flags,
+ EVT VT, unsigned, Align Alignment, MachineMemOperand::Flags Flags,
bool *Fast) const {
if (Fast) {
switch (VT.getSizeInBits()) {
// well use a regular unaligned vector load.
// We don't have any NT loads pre-SSE41.
if (!!(Flags & MachineMemOperand::MOLoad))
- return (Align < 16 || !Subtarget.hasSSE41());
+ return (Alignment < 16 || !Subtarget.hasSSE41());
return false;
}
// Misaligned accesses of any size are always allowed.
/// Returns true if the target allows unaligned memory accesses of the
/// specified type. Returns whether it is "fast" in the last argument.
- bool allowsMisalignedMemoryAccesses(EVT VT, unsigned AS, unsigned Align,
+ bool allowsMisalignedMemoryAccesses(EVT VT, unsigned AS, Align Alignment,
MachineMemOperand::Flags Flags,
bool *Fast) const override;
/// Check if this load/store access is misaligned accesses.
bool accessIsMisaligned(unsigned SzInBytes, unsigned AddressSpace,
- unsigned Alignment);
+ Align Alignment);
};
class LoadStoreVectorizerLegacyPass : public FunctionPass {
InstructionsProcessed->insert(Chain.begin(), Chain.end());
// If the store is going to be misaligned, don't vectorize it.
- if (accessIsMisaligned(SzInBytes, AS, Alignment.value())) {
+ if (accessIsMisaligned(SzInBytes, AS, Alignment)) {
if (S0->getPointerAddressSpace() != DL.getAllocaAddrSpace()) {
auto Chains = splitOddVectorElts(Chain, Sz);
return vectorizeStoreChain(Chains.first, InstructionsProcessed) |
InstructionsProcessed->insert(Chain.begin(), Chain.end());
// If the load is going to be misaligned, don't vectorize it.
- if (accessIsMisaligned(SzInBytes, AS, Alignment.value())) {
+ if (accessIsMisaligned(SzInBytes, AS, Alignment)) {
if (L0->getPointerAddressSpace() != DL.getAllocaAddrSpace()) {
auto Chains = splitOddVectorElts(Chain, Sz);
return vectorizeLoadChain(Chains.first, InstructionsProcessed) |
}
bool Vectorizer::accessIsMisaligned(unsigned SzInBytes, unsigned AddressSpace,
- unsigned Alignment) {
- if (Alignment % SzInBytes == 0)
+ Align Alignment) {
+ if (Alignment.value() % SzInBytes == 0)
return false;
bool Fast = false;