From 88218d5c5282c7b13bee667a0ce975a540aed74c Mon Sep 17 00:00:00 2001 From: Alex Richardson Date: Mon, 21 Nov 2022 12:46:00 +0000 Subject: [PATCH] [SelectionDAG] Remove deprecated MemSDNode->getAlignment() I noticed a an assertion error when building MIPS code that loaded from NULL. Loading from NULL ends up being a load with maximum alignment, and due to integer truncation the value maximum was interpreted as 0 and the assertion in MipsDAGToDAGISel::Select() failed. This previously happened to work, but the maximum alignment was increased in df84c1fe78130a86445d57563dea742e1b85156a, so it no longer fits into a 32 bit integer. Instead of just fixing the one MIPS case, this patch removes all uses of the deprecated getAlignment() call and replaces them with getAlign(). Differential Revision: https://reviews.llvm.org/D138420 --- llvm/include/llvm/CodeGen/SelectionDAGNodes.h | 2 -- llvm/include/llvm/Target/TargetSelectionDAG.td | 6 ++--- llvm/lib/Target/AMDGPU/R600ISelLowering.cpp | 8 +++---- llvm/lib/Target/AMDGPU/SIInstrInfo.td | 2 +- llvm/lib/Target/ARM/ARMInstrMVE.td | 32 +++++++++++++------------- llvm/lib/Target/ARM/ARMInstrNEON.td | 20 ++++++++-------- llvm/lib/Target/ARM/ARMInstrVFP.td | 8 +++---- llvm/lib/Target/M68k/M68kInstrInfo.td | 4 ++-- llvm/lib/Target/Mips/MipsISelDAGToDAG.cpp | 4 ++-- llvm/lib/Target/PowerPC/PPCInstrInfo.td | 14 +++++------ llvm/lib/Target/X86/X86InstrFragmentsSIMD.td | 10 ++++---- llvm/lib/Target/X86/X86InstrInfo.td | 10 ++++---- llvm/test/CodeGen/Mips/load-max-alignment.ll | 16 +++++++++++++ 13 files changed, 75 insertions(+), 61 deletions(-) create mode 100644 llvm/test/CodeGen/Mips/load-max-alignment.ll diff --git a/llvm/include/llvm/CodeGen/SelectionDAGNodes.h b/llvm/include/llvm/CodeGen/SelectionDAGNodes.h index 7eb6d06..6136b32 100644 --- a/llvm/include/llvm/CodeGen/SelectionDAGNodes.h +++ b/llvm/include/llvm/CodeGen/SelectionDAGNodes.h @@ -1290,8 +1290,6 @@ public: /// Returns alignment and volatility of the memory access Align getOriginalAlign() const { return MMO->getBaseAlign(); } Align getAlign() const { return MMO->getAlign(); } - // FIXME: Remove once transition to getAlign is over. - unsigned getAlignment() const { return MMO->getAlign().value(); } /// Return the SubclassData value, without HasDebugValue. This contains an /// encoding of the volatile flag, as well as bits used by subclasses. This diff --git a/llvm/include/llvm/Target/TargetSelectionDAG.td b/llvm/include/llvm/Target/TargetSelectionDAG.td index 378cb9c..3ff6b62 100644 --- a/llvm/include/llvm/Target/TargetSelectionDAG.td +++ b/llvm/include/llvm/Target/TargetSelectionDAG.td @@ -851,7 +851,7 @@ class PatFrags frags, code pred = [{}], // If this empty, accept any address space. list AddressSpaces = ?; - // cast(N)->getAlignment() >= + // cast(N)->getAlign() >= // If this is empty, accept any alignment. int MinAlignment = ?; @@ -1331,7 +1331,7 @@ def nontemporalstore : PatFrag<(ops node:$val, node:$ptr), def alignednontemporalstore : PatFrag<(ops node:$val, node:$ptr), (nontemporalstore node:$val, node:$ptr), [{ StoreSDNode *St = cast(N); - return St->getAlignment() >= St->getMemoryVT().getStoreSize(); + return St->getAlign() >= St->getMemoryVT().getStoreSize(); }]>; def unalignednontemporalstore : PatFrag<(ops node:$val, node:$ptr), @@ -1349,7 +1349,7 @@ def nontemporalload : PatFrag<(ops node:$ptr), def alignednontemporalload : PatFrag<(ops node:$ptr), (nontemporalload node:$ptr), [{ LoadSDNode *Ld = cast(N); - return Ld->getAlignment() >= Ld->getMemoryVT().getStoreSize(); + return Ld->getAlign() >= Ld->getMemoryVT().getStoreSize(); }]>; // setcc convenience fragments. diff --git a/llvm/lib/Target/AMDGPU/R600ISelLowering.cpp b/llvm/lib/Target/AMDGPU/R600ISelLowering.cpp index cec7923..2b2d27c 100644 --- a/llvm/lib/Target/AMDGPU/R600ISelLowering.cpp +++ b/llvm/lib/Target/AMDGPU/R600ISelLowering.cpp @@ -1003,10 +1003,10 @@ SDValue R600TargetLowering::lowerPrivateTruncStore(StoreSDNode *Store, SDValue Mask; if (Store->getMemoryVT() == MVT::i8) { - assert(Store->getAlignment() >= 1); + assert(Store->getAlign() >= 1); Mask = DAG.getConstant(0xff, DL, MVT::i32); } else if (Store->getMemoryVT() == MVT::i16) { - assert(Store->getAlignment() >= 2); + assert(Store->getAlign() >= 2); Mask = DAG.getConstant(0xffff, DL, MVT::i32); } else { llvm_unreachable("Unsupported private trunc store"); @@ -1138,7 +1138,7 @@ SDValue R600TargetLowering::LowerSTORE(SDValue Op, SelectionDAG &DAG) const { MaskConstant = DAG.getConstant(0xFF, DL, MVT::i32); } else { assert(MemVT == MVT::i16); - assert(StoreNode->getAlignment() >= 2); + assert(StoreNode->getAlign() >= 2); MaskConstant = DAG.getConstant(0xFFFF, DL, MVT::i32); } @@ -1245,7 +1245,7 @@ SDValue R600TargetLowering::lowerPrivateExtLoad(SDValue Op, LoadSDNode *Load = cast(Op); ISD::LoadExtType ExtType = Load->getExtensionType(); EVT MemVT = Load->getMemoryVT(); - assert(Load->getAlignment() >= MemVT.getStoreSize()); + assert(Load->getAlign() >= MemVT.getStoreSize()); SDValue BasePtr = Load->getBasePtr(); SDValue Chain = Load->getChain(); diff --git a/llvm/lib/Target/AMDGPU/SIInstrInfo.td b/llvm/lib/Target/AMDGPU/SIInstrInfo.td index a5bab27..dfbf820 100644 --- a/llvm/lib/Target/AMDGPU/SIInstrInfo.td +++ b/llvm/lib/Target/AMDGPU/SIInstrInfo.td @@ -555,7 +555,7 @@ def store_align16_local_m0 : PatFrag <(ops node:$value, node:$ptr), let IsStore = 1; } -let PredicateCode = [{return cast(N)->getAlignment() < 4;}], +let PredicateCode = [{return cast(N)->getAlign() < 4;}], GISelPredicateCode = [{return (*MI.memoperands_begin())->getAlign() < 4;}], AddressSpaces = [ AddrSpaces.Local ] in { def load_align_less_than_4_local : PatFrag<(ops node:$ptr), diff --git a/llvm/lib/Target/ARM/ARMInstrMVE.td b/llvm/lib/Target/ARM/ARMInstrMVE.td index 9c03f72..7896d9a 100644 --- a/llvm/lib/Target/ARM/ARMInstrMVE.td +++ b/llvm/lib/Target/ARM/ARMInstrMVE.td @@ -4481,7 +4481,7 @@ let Predicates = [HasMVEInt] in { def predicate_cast : SDNode<"ARMISD::PREDICATE_CAST", SDTUnaryOp>; def load_align4 : PatFrag<(ops node:$ptr), (load node:$ptr), [{ - return cast(N)->getAlignment() >= 4; + return cast(N)->getAlign() >= 4; }]>; let Predicates = [HasMVEInt] in { @@ -7036,19 +7036,19 @@ let Predicates = [HasMVEInt], hasSideEffects = 0, isMoveReg = 1, def aligned32_pre_store : PatFrag<(ops node:$val, node:$ptr, node:$offset), (pre_store node:$val, node:$ptr, node:$offset), [{ - return cast(N)->getAlignment() >= 4; + return cast(N)->getAlign() >= 4; }]>; def aligned32_post_store : PatFrag<(ops node:$val, node:$ptr, node:$offset), (post_store node:$val, node:$ptr, node:$offset), [{ - return cast(N)->getAlignment() >= 4; + return cast(N)->getAlign() >= 4; }]>; def aligned16_pre_store : PatFrag<(ops node:$val, node:$ptr, node:$offset), (pre_store node:$val, node:$ptr, node:$offset), [{ - return cast(N)->getAlignment() >= 2; + return cast(N)->getAlign() >= 2; }]>; def aligned16_post_store : PatFrag<(ops node:$val, node:$ptr, node:$offset), (post_store node:$val, node:$ptr, node:$offset), [{ - return cast(N)->getAlignment() >= 2; + return cast(N)->getAlign() >= 2; }]>; @@ -7075,7 +7075,7 @@ def aligned_maskedloadvi16: PatFrag<(ops node:$ptr, node:$pred, node:$passthru), (masked_ld node:$ptr, undef, node:$pred, node:$passthru), [{ auto *Ld = cast(N); EVT ScalarVT = Ld->getMemoryVT().getScalarType(); - return (ScalarVT == MVT::i16 || ScalarVT == MVT::f16) && Ld->getAlignment() >= 2; + return (ScalarVT == MVT::i16 || ScalarVT == MVT::f16) && Ld->getAlign() >= 2; }]>; def aligned_sextmaskedloadvi16 : PatFrag<(ops node:$ptr, node:$pred, node:$passthru), (aligned_maskedloadvi16 node:$ptr, node:$pred, node:$passthru), [{ @@ -7095,7 +7095,7 @@ def aligned_maskedloadvi32: PatFrag<(ops node:$ptr, node:$pred, node:$passthru), (masked_ld node:$ptr, undef, node:$pred, node:$passthru), [{ auto *Ld = cast(N); EVT ScalarVT = Ld->getMemoryVT().getScalarType(); - return (ScalarVT == MVT::i32 || ScalarVT == MVT::f32) && Ld->getAlignment() >= 4; + return (ScalarVT == MVT::i32 || ScalarVT == MVT::f32) && Ld->getAlign() >= 4; }]>; def aligned_maskedstvi8 : PatFrag<(ops node:$val, node:$ptr, node:$pred), @@ -7106,13 +7106,13 @@ def aligned_maskedstvi16 : PatFrag<(ops node:$val, node:$ptr, node:$pred), (masked_st node:$val, node:$ptr, undef, node:$pred), [{ auto *St = cast(N); EVT ScalarVT = St->getMemoryVT().getScalarType(); - return (ScalarVT == MVT::i16 || ScalarVT == MVT::f16) && St->getAlignment() >= 2; + return (ScalarVT == MVT::i16 || ScalarVT == MVT::f16) && St->getAlign() >= 2; }]>; def aligned_maskedstvi32 : PatFrag<(ops node:$val, node:$ptr, node:$pred), (masked_st node:$val, node:$ptr, undef, node:$pred), [{ auto *St = cast(N); EVT ScalarVT = St->getMemoryVT().getScalarType(); - return (ScalarVT == MVT::i32 || ScalarVT == MVT::f32) && St->getAlignment() >= 4; + return (ScalarVT == MVT::i32 || ScalarVT == MVT::f32) && St->getAlign() >= 4; }]>; def pre_maskedstore : PatFrag<(ops node:$val, node:$base, node:$offset, node:$mask), @@ -7137,25 +7137,25 @@ def aligned_pre_maskedstorevi16 : PatFrag<(ops node:$val, node:$ptr, node:$offse (pre_maskedstore node:$val, node:$ptr, node:$offset, node:$mask), [{ auto *St = cast(N); EVT ScalarVT = St->getMemoryVT().getScalarType(); - return (ScalarVT == MVT::i16 || ScalarVT == MVT::f16) && St->getAlignment() >= 2; + return (ScalarVT == MVT::i16 || ScalarVT == MVT::f16) && St->getAlign() >= 2; }]>; def aligned_post_maskedstorevi16 : PatFrag<(ops node:$val, node:$ptr, node:$offset, node:$mask), (post_maskedstore node:$val, node:$ptr, node:$offset, node:$mask), [{ auto *St = cast(N); EVT ScalarVT = St->getMemoryVT().getScalarType(); - return (ScalarVT == MVT::i16 || ScalarVT == MVT::f16) && St->getAlignment() >= 2; + return (ScalarVT == MVT::i16 || ScalarVT == MVT::f16) && St->getAlign() >= 2; }]>; def aligned_pre_maskedstorevi32 : PatFrag<(ops node:$val, node:$ptr, node:$offset, node:$mask), (pre_maskedstore node:$val, node:$ptr, node:$offset, node:$mask), [{ auto *St = cast(N); EVT ScalarVT = St->getMemoryVT().getScalarType(); - return (ScalarVT == MVT::i32 || ScalarVT == MVT::f32) && St->getAlignment() >= 4; + return (ScalarVT == MVT::i32 || ScalarVT == MVT::f32) && St->getAlign() >= 4; }]>; def aligned_post_maskedstorevi32 : PatFrag<(ops node:$val, node:$ptr, node:$offset, node:$mask), (post_maskedstore node:$val, node:$ptr, node:$offset, node:$mask), [{ auto *St = cast(N); EVT ScalarVT = St->getMemoryVT().getScalarType(); - return (ScalarVT == MVT::i32 || ScalarVT == MVT::f32) && St->getAlignment() >= 4; + return (ScalarVT == MVT::i32 || ScalarVT == MVT::f32) && St->getAlign() >= 4; }]>; @@ -7197,7 +7197,7 @@ def aligned_truncmaskedstvi16 : PatFrag<(ops node:$val, node:$base, node:$pred), (truncmaskedst node:$val, node:$base, node:$pred), [{ auto *St = cast(N); EVT ScalarVT = St->getMemoryVT().getScalarType(); - return (ScalarVT == MVT::i16 || ScalarVT == MVT::f16) && St->getAlignment() >= 2; + return (ScalarVT == MVT::i16 || ScalarVT == MVT::f16) && St->getAlign() >= 2; }]>; def pre_truncmaskedst : PatFrag<(ops node:$val, node:$base, node:$offset, node:$pred), (masked_st node:$val, node:$base, node:$offset, node:$pred), [{ @@ -7212,7 +7212,7 @@ def aligned_pre_truncmaskedstvi16 : PatFrag<(ops node:$val, node:$base, node:$of (pre_truncmaskedst node:$val, node:$base, node:$offset, node:$pred), [{ auto *St = cast(N); EVT ScalarVT = St->getMemoryVT().getScalarType(); - return (ScalarVT == MVT::i16 || ScalarVT == MVT::f16) && St->getAlignment() >= 2; + return (ScalarVT == MVT::i16 || ScalarVT == MVT::f16) && St->getAlign() >= 2; }]>; def post_truncmaskedst : PatFrag<(ops node:$val, node:$base, node:$offset, node:$postd), (masked_st node:$val, node:$base, node:$offset, node:$postd), [{ @@ -7227,7 +7227,7 @@ def aligned_post_truncmaskedstvi16 : PatFrag<(ops node:$val, node:$base, node:$o (post_truncmaskedst node:$val, node:$base, node:$offset, node:$postd), [{ auto *St = cast(N); EVT ScalarVT = St->getMemoryVT().getScalarType(); - return (ScalarVT == MVT::i16 || ScalarVT == MVT::f16) && St->getAlignment() >= 2; + return (ScalarVT == MVT::i16 || ScalarVT == MVT::f16) && St->getAlign() >= 2; }]>; // Load/store patterns diff --git a/llvm/lib/Target/ARM/ARMInstrNEON.td b/llvm/lib/Target/ARM/ARMInstrNEON.td index 6b7a4a4..4c8fe44 100644 --- a/llvm/lib/Target/ARM/ARMInstrNEON.td +++ b/llvm/lib/Target/ARM/ARMInstrNEON.td @@ -439,39 +439,39 @@ def VecListFourQWordIndexed : Operand { } def dword_alignedload : PatFrag<(ops node:$ptr), (load node:$ptr), [{ - return cast(N)->getAlignment() >= 8; + return cast(N)->getAlign() >= 8; }]>; def dword_alignedstore : PatFrag<(ops node:$val, node:$ptr), (store node:$val, node:$ptr), [{ - return cast(N)->getAlignment() >= 8; + return cast(N)->getAlign() >= 8; }]>; def word_alignedload : PatFrag<(ops node:$ptr), (load node:$ptr), [{ - return cast(N)->getAlignment() == 4; + return cast(N)->getAlign() == 4; }]>; def word_alignedstore : PatFrag<(ops node:$val, node:$ptr), (store node:$val, node:$ptr), [{ - return cast(N)->getAlignment() == 4; + return cast(N)->getAlign() == 4; }]>; def hword_alignedload : PatFrag<(ops node:$ptr), (load node:$ptr), [{ - return cast(N)->getAlignment() == 2; + return cast(N)->getAlign() == 2; }]>; def hword_alignedstore : PatFrag<(ops node:$val, node:$ptr), (store node:$val, node:$ptr), [{ - return cast(N)->getAlignment() == 2; + return cast(N)->getAlign() == 2; }]>; def byte_alignedload : PatFrag<(ops node:$ptr), (load node:$ptr), [{ - return cast(N)->getAlignment() == 1; + return cast(N)->getAlign() == 1; }]>; def byte_alignedstore : PatFrag<(ops node:$val, node:$ptr), (store node:$val, node:$ptr), [{ - return cast(N)->getAlignment() == 1; + return cast(N)->getAlign() == 1; }]>; def non_word_alignedload : PatFrag<(ops node:$ptr), (load node:$ptr), [{ - return cast(N)->getAlignment() < 4; + return cast(N)->getAlign() < 4; }]>; def non_word_alignedstore : PatFrag<(ops node:$val, node:$ptr), (store node:$val, node:$ptr), [{ - return cast(N)->getAlignment() < 4; + return cast(N)->getAlign() < 4; }]>; //===----------------------------------------------------------------------===// diff --git a/llvm/lib/Target/ARM/ARMInstrVFP.td b/llvm/lib/Target/ARM/ARMInstrVFP.td index b233555..8be35c8 100644 --- a/llvm/lib/Target/ARM/ARMInstrVFP.td +++ b/llvm/lib/Target/ARM/ARMInstrVFP.td @@ -111,21 +111,21 @@ def vfp_f64imm : Operand, } def alignedload16 : PatFrag<(ops node:$ptr), (load node:$ptr), [{ - return cast(N)->getAlignment() >= 2; + return cast(N)->getAlign() >= 2; }]>; def alignedload32 : PatFrag<(ops node:$ptr), (load node:$ptr), [{ - return cast(N)->getAlignment() >= 4; + return cast(N)->getAlign() >= 4; }]>; def alignedstore16 : PatFrag<(ops node:$val, node:$ptr), (store node:$val, node:$ptr), [{ - return cast(N)->getAlignment() >= 2; + return cast(N)->getAlign() >= 2; }]>; def alignedstore32 : PatFrag<(ops node:$val, node:$ptr), (store node:$val, node:$ptr), [{ - return cast(N)->getAlignment() >= 4; + return cast(N)->getAlign() >= 4; }]>; // The VCVT to/from fixed-point instructions encode the 'fbits' operand diff --git a/llvm/lib/Target/M68k/M68kInstrInfo.td b/llvm/lib/Target/M68k/M68kInstrInfo.td index 80c39e2..a959186 100644 --- a/llvm/lib/Target/M68k/M68kInstrInfo.td +++ b/llvm/lib/Target/M68k/M68kInstrInfo.td @@ -522,7 +522,7 @@ def Mxloadi16 : PatFrag<(ops node:$ptr), (i16 (unindexedload node:$ptr)), [{ if (ExtType == ISD::NON_EXTLOAD) return true; if (ExtType == ISD::EXTLOAD) - return LD->getAlignment() >= 2 && !LD->isSimple(); + return LD->getAlign() >= 2 && !LD->isSimple(); return false; }]>; @@ -532,7 +532,7 @@ def Mxloadi32 : PatFrag<(ops node:$ptr), (i32 (unindexedload node:$ptr)), [{ if (ExtType == ISD::NON_EXTLOAD) return true; if (ExtType == ISD::EXTLOAD) - return LD->getAlignment() >= 4 && !LD->isSimple(); + return LD->getAlign() >= 4 && !LD->isSimple(); return false; }]>; diff --git a/llvm/lib/Target/Mips/MipsISelDAGToDAG.cpp b/llvm/lib/Target/Mips/MipsISelDAGToDAG.cpp index c4bb3d9..4fb11fa 100644 --- a/llvm/lib/Target/Mips/MipsISelDAGToDAG.cpp +++ b/llvm/lib/Target/Mips/MipsISelDAGToDAG.cpp @@ -296,8 +296,8 @@ void MipsDAGToDAGISel::Select(SDNode *Node) { case ISD::LOAD: case ISD::STORE: assert((Subtarget->systemSupportsUnalignedAccess() || - cast(Node)->getMemoryVT().getSizeInBits() / 8 <= - cast(Node)->getAlignment()) && + cast(Node)->getAlign() >= + cast(Node)->getMemoryVT().getStoreSize()) && "Unexpected unaligned loads/stores."); break; #endif diff --git a/llvm/lib/Target/PowerPC/PPCInstrInfo.td b/llvm/lib/Target/PowerPC/PPCInstrInfo.td index 45a69e2..faf7f05 100644 --- a/llvm/lib/Target/PowerPC/PPCInstrInfo.td +++ b/llvm/lib/Target/PowerPC/PPCInstrInfo.td @@ -569,30 +569,30 @@ def imm64ZExt32 : Operand, ImmLeaf(N)->getAlignment() >= 4; + return isOffsetMultipleOf(N, 4) || cast(N)->getAlign() >= 4; }]>; def DSFormStore : PatFrag<(ops node:$val, node:$ptr), (store node:$val, node:$ptr), [{ - return isOffsetMultipleOf(N, 4) || cast(N)->getAlignment() >= 4; + return isOffsetMultipleOf(N, 4) || cast(N)->getAlign() >= 4; }]>; def DSFormSextLoadi32 : PatFrag<(ops node:$ptr), (sextloadi32 node:$ptr), [{ - return isOffsetMultipleOf(N, 4) || cast(N)->getAlignment() >= 4; + return isOffsetMultipleOf(N, 4) || cast(N)->getAlign() >= 4; }]>; def DSFormPreStore : PatFrag< (ops node:$val, node:$base, node:$offset), (pre_store node:$val, node:$base, node:$offset), [{ - return isOffsetMultipleOf(N, 4) || cast(N)->getAlignment() >= 4; + return isOffsetMultipleOf(N, 4) || cast(N)->getAlign() >= 4; }]>; def NonDSFormLoad : PatFrag<(ops node:$ptr), (load node:$ptr), [{ - return cast(N)->getAlignment() < 4 && !isOffsetMultipleOf(N, 4); + return cast(N)->getAlign() < 4 && !isOffsetMultipleOf(N, 4); }]>; def NonDSFormStore : PatFrag<(ops node:$val, node:$ptr), (store node:$val, node:$ptr), [{ - return cast(N)->getAlignment() < 4 && !isOffsetMultipleOf(N, 4); + return cast(N)->getAlign() < 4 && !isOffsetMultipleOf(N, 4); }]>; def NonDSFormSextLoadi32 : PatFrag<(ops node:$ptr), (sextloadi32 node:$ptr), [{ - return cast(N)->getAlignment() < 4 && !isOffsetMultipleOf(N, 4); + return cast(N)->getAlign() < 4 && !isOffsetMultipleOf(N, 4); }]>; // This is a somewhat weaker condition than actually checking for 16-byte diff --git a/llvm/lib/Target/X86/X86InstrFragmentsSIMD.td b/llvm/lib/Target/X86/X86InstrFragmentsSIMD.td index 774f7e9..9c1f33e 100644 --- a/llvm/lib/Target/X86/X86InstrFragmentsSIMD.td +++ b/llvm/lib/Target/X86/X86InstrFragmentsSIMD.td @@ -867,13 +867,13 @@ def extloadv16f16 : PatFrag<(ops node:$ptr), (extloadvf16 node:$ptr)>; def alignedstore : PatFrag<(ops node:$val, node:$ptr), (store node:$val, node:$ptr), [{ auto *St = cast(N); - return St->getAlignment() >= St->getMemoryVT().getStoreSize(); + return St->getAlign() >= St->getMemoryVT().getStoreSize(); }]>; // Like 'load', but always requires vector size alignment. def alignedload : PatFrag<(ops node:$ptr), (load node:$ptr), [{ auto *Ld = cast(N); - return Ld->getAlignment() >= Ld->getMemoryVT().getStoreSize(); + return Ld->getAlign() >= Ld->getMemoryVT().getStoreSize(); }]>; // 128-bit aligned load pattern fragments @@ -941,7 +941,7 @@ def alignedloadv64i8 : PatFrag<(ops node:$ptr), def memop : PatFrag<(ops node:$ptr), (load node:$ptr), [{ auto *Ld = cast(N); return Subtarget->hasSSEUnalignedMem() || - Ld->getAlignment() >= Ld->getMemoryVT().getStoreSize(); + Ld->getAlign() >= Ld->getMemoryVT().getStoreSize(); }]>; // 128-bit memop pattern fragments @@ -1134,7 +1134,7 @@ def masked_load_aligned : PatFrag<(ops node:$src1, node:$src2, node:$src3), // We can't use memory VT because type widening changes the node VT, but // not the memory VT. auto *Ld = cast(N); - return Ld->getAlignment() >= Ld->getValueType(0).getStoreSize(); + return Ld->getAlign() >= Ld->getValueType(0).getStoreSize(); }]>; def X86mExpandingLoad : PatFrag<(ops node:$src1, node:$src2, node:$src3), @@ -1159,7 +1159,7 @@ def masked_store_aligned : PatFrag<(ops node:$src1, node:$src2, node:$src3), // We can't use memory VT because type widening changes the node VT, but // not the memory VT. auto *St = cast(N); - return St->getAlignment() >= St->getOperand(1).getValueType().getStoreSize(); + return St->getAlign() >= St->getOperand(1).getValueType().getStoreSize(); }]>; def X86mCompressingStore : PatFrag<(ops node:$src1, node:$src2, node:$src3), diff --git a/llvm/lib/Target/X86/X86InstrInfo.td b/llvm/lib/Target/X86/X86InstrInfo.td index 2ed7e45..4285df0 100644 --- a/llvm/lib/Target/X86/X86InstrInfo.td +++ b/llvm/lib/Target/X86/X86InstrInfo.td @@ -1191,7 +1191,7 @@ def loadi16 : PatFrag<(ops node:$ptr), (i16 (unindexedload node:$ptr)), [{ if (ExtType == ISD::NON_EXTLOAD) return true; if (ExtType == ISD::EXTLOAD && EnablePromoteAnyextLoad) - return LD->getAlignment() >= 2 && LD->isSimple(); + return LD->getAlign() >= 2 && LD->isSimple(); return false; }]>; @@ -1201,7 +1201,7 @@ def loadi32 : PatFrag<(ops node:$ptr), (i32 (unindexedload node:$ptr)), [{ if (ExtType == ISD::NON_EXTLOAD) return true; if (ExtType == ISD::EXTLOAD && EnablePromoteAnyextLoad) - return LD->getAlignment() >= 4 && LD->isSimple(); + return LD->getAlign() >= 4 && LD->isSimple(); return false; }]>; @@ -1213,12 +1213,12 @@ def loadf80 : PatFrag<(ops node:$ptr), (f80 (load node:$ptr))>; def loadf128 : PatFrag<(ops node:$ptr), (f128 (load node:$ptr))>; def alignedloadf128 : PatFrag<(ops node:$ptr), (f128 (load node:$ptr)), [{ LoadSDNode *Ld = cast(N); - return Ld->getAlignment() >= Ld->getMemoryVT().getStoreSize(); + return Ld->getAlign() >= Ld->getMemoryVT().getStoreSize(); }]>; def memopf128 : PatFrag<(ops node:$ptr), (f128 (load node:$ptr)), [{ LoadSDNode *Ld = cast(N); return Subtarget->hasSSEUnalignedMem() || - Ld->getAlignment() >= Ld->getMemoryVT().getStoreSize(); + Ld->getAlign() >= Ld->getMemoryVT().getStoreSize(); }]>; def sextloadi16i8 : PatFrag<(ops node:$ptr), (i16 (sextloadi8 node:$ptr))>; @@ -1259,7 +1259,7 @@ def extloadi64i32 : PatFrag<(ops node:$ptr), (i64 (unindexedload node:$ptr)), [ if (LD->getMemoryVT() == MVT::i32) return true; - return LD->getAlignment() >= 4 && LD->isSimple(); + return LD->getAlign() >= 4 && LD->isSimple(); }]>; diff --git a/llvm/test/CodeGen/Mips/load-max-alignment.ll b/llvm/test/CodeGen/Mips/load-max-alignment.ll new file mode 100644 index 0000000..3f7798a --- /dev/null +++ b/llvm/test/CodeGen/Mips/load-max-alignment.ll @@ -0,0 +1,16 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +;; Loading a value with max aligment triggered an assertion due to unsigned +;; integer truncation causing 1<<32 to be interpreted as 0 after the max +;; alignment was increased in df84c1fe78130a86445d57563dea742e1b85156a +; RUN: llc -mtriple=mips64 -target-abi=n64 -relocation-model=pic < %s | FileCheck %s + +define i32 @load_max_align(ptr %arg) nounwind { +; CHECK-LABEL: load_max_align: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: lw $2, 0($4) +; CHECK-NEXT: jr $ra +; CHECK-NEXT: nop +entry: + %result = load i32, ptr %arg, align 4294967296 + ret i32 %result +} -- 2.7.4