Revert "GlobalISel: Add G_ASSERT_ALIGN hint instruction"
authorJames Y Knight <jyknight@google.com>
Fri, 14 Jan 2022 04:49:17 +0000 (04:49 +0000)
committerJames Y Knight <jyknight@google.com>
Fri, 14 Jan 2022 04:50:07 +0000 (04:50 +0000)
This commit sometimes causes a crash when compiling a vtable thunk. E.g.:

clang '--target=aarch64-grtev4-linux-gnu' -xc++ - -c -o /dev/null <<EOF
struct a {
  virtual int f();
};
struct c {
  virtual int &g() const;
};
struct d : a, c {
  int &g() const;
};
int &d::g() const {}
EOF

Some follow-up commits have been reverted as well:
Revert "IR: Make getRetAlign check callee function attributes"
Revert "Fix MSVC "32-bit shift implicitly converted to 64 bits" warning. NFC."
Revert "Fix MSVC "32-bit shift implicitly converted to 64 bits" warning. NFC."

This reverts commit 4f414af6a77cdbd9b6303a7afa525cfb3f9d792a.
This reverts commit a5507d2e253a2c94c3ca7794edf7385af8082b97.
This reverts commit 3d2d208f6a0a421b23937c39b9d371183a5913a3.
This reverts commit 07ddfa95e3b5ea8464e90545f592624221b854ae.

12 files changed:
llvm/include/llvm/CodeGen/GlobalISel/MachineIRBuilder.h
llvm/include/llvm/IR/InstrTypes.h
llvm/include/llvm/Support/TargetOpcodes.def
llvm/include/llvm/Target/GenericOpcodes.td
llvm/lib/CodeGen/GlobalISel/CallLowering.cpp
llvm/lib/CodeGen/GlobalISel/GISelKnownBits.cpp
llvm/lib/CodeGen/GlobalISel/MachineIRBuilder.cpp
llvm/lib/CodeGen/GlobalISel/RegBankSelect.cpp
llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
llvm/test/CodeGen/AMDGPU/GlobalISel/irtranslator-assert-align.ll [deleted file]
llvm/test/CodeGen/AMDGPU/GlobalISel/regbankselect-assert-align.mir [deleted file]
llvm/unittests/CodeGen/GlobalISel/KnownBitsTest.cpp

index c4c2fc0..fde0cb3 100644 (file)
@@ -836,38 +836,17 @@ public:
   /// \return a MachineInstrBuilder for the newly created instruction.
   MachineInstrBuilder buildCopy(const DstOp &Res, const SrcOp &Op);
 
-
-  /// Build and insert G_ASSERT_SEXT, G_ASSERT_ZEXT, or G_ASSERT_ALIGN
-  ///
-  /// \return a MachineInstrBuilder for the newly created instruction.
-  MachineInstrBuilder buildAssertOp(unsigned Opc, const DstOp &Res, const SrcOp &Op,
-                                   unsigned Val) {
-    return buildInstr(Opc, Res, Op).addImm(Val);
-  }
-
   /// Build and insert \p Res = G_ASSERT_ZEXT Op, Size
   ///
   /// \return a MachineInstrBuilder for the newly created instruction.
   MachineInstrBuilder buildAssertZExt(const DstOp &Res, const SrcOp &Op,
-                                      unsigned Size) {
-    return buildAssertOp(TargetOpcode::G_ASSERT_ZEXT, Res, Op, Size);
-  }
+                                      unsigned Size);
 
   /// Build and insert \p Res = G_ASSERT_SEXT Op, Size
   ///
   /// \return a MachineInstrBuilder for the newly created instruction.
   MachineInstrBuilder buildAssertSExt(const DstOp &Res, const SrcOp &Op,
-                                      unsigned Size) {
-    return buildAssertOp(TargetOpcode::G_ASSERT_SEXT, Res, Op, Size);
-  }
-
-  /// Build and insert \p Res = G_ASSERT_ALIGN Op, AlignVal
-  ///
-  /// \return a MachineInstrBuilder for the newly created instruction.
-  MachineInstrBuilder buildAssertAlign(const DstOp &Res, const SrcOp &Op,
-                                      Align AlignVal) {
-    return buildAssertOp(TargetOpcode::G_ASSERT_ALIGN, Res, Op, AlignVal.value());
-  }
+                                      unsigned Size);
 
   /// Build and insert `Res = G_LOAD Addr, MMO`.
   ///
index fbd7e1c..03839e0 100644 (file)
@@ -1723,13 +1723,7 @@ public:
   }
 
   /// Extract the alignment of the return value.
-  MaybeAlign getRetAlign() const {
-    if (auto Align = Attrs.getRetAlignment())
-      return Align;
-    if (const Function *F = getCalledFunction())
-      return F->getAttributes().getRetAlignment();
-    return None;
-  }
+  MaybeAlign getRetAlign() const { return Attrs.getRetAlignment(); }
 
   /// Extract the alignment for a call or parameter (0=unknown).
   MaybeAlign getParamAlign(unsigned ArgNo) const {
index 428cbb4..b34b885 100644 (file)
@@ -228,11 +228,10 @@ HANDLE_TARGET_OPCODE(ICALL_BRANCH_FUNNEL)
 /// generate code. These instructions only act as optimization hints.
 HANDLE_TARGET_OPCODE(G_ASSERT_SEXT)
 HANDLE_TARGET_OPCODE(G_ASSERT_ZEXT)
-HANDLE_TARGET_OPCODE(G_ASSERT_ALIGN)
 HANDLE_TARGET_OPCODE_MARKER(PRE_ISEL_GENERIC_OPTIMIZATION_HINT_START,
                             G_ASSERT_SEXT)
 HANDLE_TARGET_OPCODE_MARKER(PRE_ISEL_GENERIC_OPTIMIZATION_HINT_END,
-                            G_ASSERT_ALIGN)
+                            G_ASSERT_ZEXT)
 
 /// Generic ADD instruction. This is an integer add.
 HANDLE_TARGET_OPCODE(G_ADD)
index 2af20ab..72c9748 100644 (file)
@@ -1434,10 +1434,3 @@ def G_ASSERT_SEXT : GenericInstruction {
   let InOperandList = (ins type0:$src, untyped_imm_0:$sz);
   let hasSideEffects = false;
 }
-
-// Asserts that a value has at least the given alignment.
-def G_ASSERT_ALIGN : GenericInstruction {
-  let OutOperandList = (outs type0:$dst);
-  let InOperandList = (ins type0:$src, untyped_imm_0:$align);
-  let hasSideEffects = false;
-}
index 7345fb2..d061664 100644 (file)
@@ -86,7 +86,6 @@ bool CallLowering::lowerCall(MachineIRBuilder &MIRBuilder, const CallBase &CB,
   CallLoweringInfo Info;
   const DataLayout &DL = MIRBuilder.getDataLayout();
   MachineFunction &MF = MIRBuilder.getMF();
-  MachineRegisterInfo &MRI = MF.getRegInfo();
   bool CanBeTailCalled = CB.isTailCall() &&
                          isInTailCallPosition(CB, MF.getTarget()) &&
                          (MF.getFunction()
@@ -110,7 +109,6 @@ bool CallLowering::lowerCall(MachineIRBuilder &MIRBuilder, const CallBase &CB,
     CanBeTailCalled = false;
   }
 
-
   // First step is to marshall all the function's parameters into the correct
   // physregs and memory locations. Gather the sequence of argument types that
   // we'll pass to the assigner function.
@@ -138,23 +136,10 @@ bool CallLowering::lowerCall(MachineIRBuilder &MIRBuilder, const CallBase &CB,
   else
     Info.Callee = MachineOperand::CreateReg(GetCalleeReg(), false);
 
-  Register ReturnHintAlignReg;
-  Align ReturnHintAlign;
-
   Info.OrigRet = ArgInfo{ResRegs, RetTy, 0, ISD::ArgFlagsTy{}};
-
-  if (!Info.OrigRet.Ty->isVoidTy()) {
+  if (!Info.OrigRet.Ty->isVoidTy())
     setArgFlags(Info.OrigRet, AttributeList::ReturnIndex, DL, CB);
 
-    if (MaybeAlign Alignment = CB.getRetAlign()) {
-      if (*Alignment > Align(1)) {
-        ReturnHintAlignReg = MRI.cloneVirtualRegister(ResRegs[0]);
-        ReturnHintAlign = *Alignment;
-        std::swap(Info.OrigRet.Regs[0], ReturnHintAlignReg);
-      }
-    }
-  }
-
   Info.CB = &CB;
   Info.KnownCallees = CB.getMetadata(LLVMContext::MD_callees);
   Info.CallConv = CallConv;
@@ -162,15 +147,7 @@ bool CallLowering::lowerCall(MachineIRBuilder &MIRBuilder, const CallBase &CB,
   Info.IsMustTailCall = CB.isMustTailCall();
   Info.IsTailCall = CanBeTailCalled;
   Info.IsVarArg = IsVarArg;
-  if (!lowerCall(MIRBuilder, Info))
-    return false;
-
-  if (ReturnHintAlignReg) {
-    MIRBuilder.buildAssertAlign(ReturnHintAlignReg, Info.OrigRet.Regs[0],
-                                ReturnHintAlign);
-  }
-
-  return true;
+  return lowerCall(MIRBuilder, Info);
 }
 
 template <typename FuncInfoTy>
index 64c2f0d..306af80 100644 (file)
@@ -37,11 +37,6 @@ Align GISelKnownBits::computeKnownAlignment(Register R, unsigned Depth) {
   switch (MI->getOpcode()) {
   case TargetOpcode::COPY:
     return computeKnownAlignment(MI->getOperand(1).getReg(), Depth);
-  case TargetOpcode::G_ASSERT_ALIGN: {
-    // TODO: Min with source
-    int64_t LogAlign = MI->getOperand(2).getImm();
-    return Align(1ull << LogAlign);
-  }
   case TargetOpcode::G_FRAME_INDEX: {
     int FrameIdx = MI->getOperand(1).getIndex();
     return MF.getFrameInfo().getObjectAlign(FrameIdx);
@@ -471,18 +466,6 @@ void GISelKnownBits::computeKnownBitsImpl(Register R, KnownBits &Known,
       Known.Zero.setBitsFrom(SrcBitWidth);
     break;
   }
-  case TargetOpcode::G_ASSERT_ALIGN: {
-    int64_t LogOfAlign = MI.getOperand(2).getImm();
-    if (LogOfAlign == 0)
-      break;
-
-    // TODO: Should use maximum with source
-    // If a node is guaranteed to be aligned, set low zero bits accordingly as
-    // well as clearing one bits.
-    Known.Zero.setLowBits(LogOfAlign);
-    Known.One.clearLowBits(LogOfAlign);
-    break;
-  }
   case TargetOpcode::G_MERGE_VALUES: {
     unsigned NumOps = MI.getNumOperands();
     unsigned OpSize = MRI.getType(MI.getOperand(1).getReg()).getSizeInBits();
index c672056..3912518 100644 (file)
@@ -282,6 +282,18 @@ MachineInstrBuilder MachineIRBuilder::buildCopy(const DstOp &Res,
   return buildInstr(TargetOpcode::COPY, Res, Op);
 }
 
+MachineInstrBuilder MachineIRBuilder::buildAssertSExt(const DstOp &Res,
+                                                      const SrcOp &Op,
+                                                      unsigned Size) {
+  return buildInstr(TargetOpcode::G_ASSERT_SEXT, Res, Op).addImm(Size);
+}
+
+MachineInstrBuilder MachineIRBuilder::buildAssertZExt(const DstOp &Res,
+                                                      const SrcOp &Op,
+                                                      unsigned Size) {
+  return buildInstr(TargetOpcode::G_ASSERT_ZEXT, Res, Op).addImm(Size);
+}
+
 MachineInstrBuilder MachineIRBuilder::buildConstant(const DstOp &Res,
                                                     const ConstantInt &Val) {
   LLT Ty = Res.getLLTTy(*getMRI());
index 01af6bb..8d2677e 100644 (file)
@@ -626,8 +626,7 @@ bool RegBankSelect::assignInstr(MachineInstr &MI) {
   unsigned Opc = MI.getOpcode();
   if (isPreISelGenericOptimizationHint(Opc)) {
     assert((Opc == TargetOpcode::G_ASSERT_ZEXT ||
-            Opc == TargetOpcode::G_ASSERT_SEXT ||
-            Opc == TargetOpcode::G_ASSERT_ALIGN) &&
+            Opc == TargetOpcode::G_ASSERT_SEXT) &&
            "Unexpected hint opcode!");
     // The only correct mapping for these is to always use the source register
     // bank.
index 5168869..2d1dbcb 100644 (file)
@@ -3363,8 +3363,6 @@ KnownBits SelectionDAG::computeKnownBits(SDValue Op, const APInt &DemandedElts,
   case ISD::AssertAlign: {
     unsigned LogOfAlign = Log2(cast<AssertAlignSDNode>(Op)->getAlign());
     assert(LogOfAlign != 0);
-
-    // TODO: Should use maximum with source
     // If a node is guaranteed to be aligned, set low zero bits accordingly as
     // well as clearing one bits.
     Known.Zero.setLowBits(LogOfAlign);
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/irtranslator-assert-align.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/irtranslator-assert-align.ll
deleted file mode 100644 (file)
index 286a0a6..0000000
+++ /dev/null
@@ -1,169 +0,0 @@
-; NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
-; RUN: llc -march=amdgcn -mcpu=fiji -O0 -stop-after=irtranslator -global-isel -verify-machineinstrs -o - %s | FileCheck %s
-
-; TODO: Could potentially insert it here
-define void @arg_align_8(i8 addrspace(1)* align 8 %arg0) {
-  ; CHECK-LABEL: name: arg_align_8
-  ; CHECK: bb.1 (%ir-block.0):
-  ; CHECK-NEXT:   liveins: $vgpr0, $vgpr1, $sgpr30_sgpr31
-  ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
-  ; CHECK-NEXT:   [[MV:%[0-9]+]]:_(p1) = G_MERGE_VALUES [[COPY]](s32), [[COPY1]](s32)
-  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
-  ; CHECK-NEXT:   [[C:%[0-9]+]]:_(s8) = G_CONSTANT i8 0
-  ; CHECK-NEXT:   G_STORE [[C]](s8), [[MV]](p1) :: (store (s8) into %ir.arg0, align 8, addrspace 1)
-  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY2]]
-  ; CHECK-NEXT:   S_SETPC_B64_return [[COPY3]]
-  store i8 0, i8 addrspace(1)* %arg0, align 8
-  ret void
-}
-
-declare i8 addrspace(1)* @returns_ptr()
-declare align 8 i8 addrspace(1)* @returns_ptr_align8()
-
-define void @call_result_align_1() {
-  ; CHECK-LABEL: name: call_result_align_1
-  ; CHECK: bb.1 (%ir-block.0):
-  ; CHECK-NEXT:   liveins: $sgpr12, $sgpr13, $sgpr14, $vgpr31, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9, $sgpr10_sgpr11, $sgpr30_sgpr31
-  ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:vgpr_32(s32) = COPY $vgpr31
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:sgpr_32 = COPY $sgpr14
-  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:sgpr_32 = COPY $sgpr13
-  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:sgpr_32 = COPY $sgpr12
-  ; CHECK-NEXT:   [[COPY4:%[0-9]+]]:sgpr_64 = COPY $sgpr10_sgpr11
-  ; CHECK-NEXT:   [[COPY5:%[0-9]+]]:sgpr_64 = COPY $sgpr8_sgpr9
-  ; CHECK-NEXT:   [[COPY6:%[0-9]+]]:sgpr_64 = COPY $sgpr6_sgpr7
-  ; CHECK-NEXT:   [[COPY7:%[0-9]+]]:sgpr_64 = COPY $sgpr4_sgpr5
-  ; CHECK-NEXT:   [[COPY8:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
-  ; CHECK-NEXT:   [[C:%[0-9]+]]:_(s8) = G_CONSTANT i8 0
-  ; CHECK-NEXT:   ADJCALLSTACKUP 0, 0, implicit-def $scc
-  ; CHECK-NEXT:   [[GV:%[0-9]+]]:_(p0) = G_GLOBAL_VALUE @returns_ptr
-  ; CHECK-NEXT:   [[COPY9:%[0-9]+]]:_(p4) = COPY [[COPY7]]
-  ; CHECK-NEXT:   [[COPY10:%[0-9]+]]:_(p4) = COPY [[COPY6]]
-  ; CHECK-NEXT:   [[COPY11:%[0-9]+]]:_(p4) = COPY [[COPY5]]
-  ; CHECK-NEXT:   [[COPY12:%[0-9]+]]:_(s64) = COPY [[COPY4]]
-  ; CHECK-NEXT:   [[COPY13:%[0-9]+]]:_(s32) = COPY [[COPY3]]
-  ; CHECK-NEXT:   [[COPY14:%[0-9]+]]:_(s32) = COPY [[COPY2]]
-  ; CHECK-NEXT:   [[COPY15:%[0-9]+]]:_(s32) = COPY [[COPY1]]
-  ; CHECK-NEXT:   [[COPY16:%[0-9]+]]:_(s32) = COPY [[COPY]](s32)
-  ; CHECK-NEXT:   [[COPY17:%[0-9]+]]:_(<4 x s32>) = COPY $sgpr0_sgpr1_sgpr2_sgpr3
-  ; CHECK-NEXT:   $sgpr0_sgpr1_sgpr2_sgpr3 = COPY [[COPY17]](<4 x s32>)
-  ; CHECK-NEXT:   $sgpr4_sgpr5 = COPY [[COPY9]](p4)
-  ; CHECK-NEXT:   $sgpr6_sgpr7 = COPY [[COPY10]](p4)
-  ; CHECK-NEXT:   $sgpr8_sgpr9 = COPY [[COPY11]](p4)
-  ; CHECK-NEXT:   $sgpr10_sgpr11 = COPY [[COPY12]](s64)
-  ; CHECK-NEXT:   $sgpr12 = COPY [[COPY13]](s32)
-  ; CHECK-NEXT:   $sgpr13 = COPY [[COPY14]](s32)
-  ; CHECK-NEXT:   $sgpr14 = COPY [[COPY15]](s32)
-  ; CHECK-NEXT:   $vgpr31 = COPY [[COPY16]](s32)
-  ; CHECK-NEXT:   $sgpr30_sgpr31 = G_SI_CALL [[GV]](p0), @returns_ptr, csr_amdgpu_highregs, implicit $sgpr0_sgpr1_sgpr2_sgpr3, implicit $sgpr4_sgpr5, implicit $sgpr6_sgpr7, implicit $sgpr8_sgpr9, implicit $sgpr10_sgpr11, implicit $sgpr12, implicit $sgpr13, implicit $sgpr14, implicit $vgpr31, implicit-def $vgpr0, implicit-def $vgpr1
-  ; CHECK-NEXT:   [[COPY18:%[0-9]+]]:_(s32) = COPY $vgpr0
-  ; CHECK-NEXT:   [[COPY19:%[0-9]+]]:_(s32) = COPY $vgpr1
-  ; CHECK-NEXT:   [[MV:%[0-9]+]]:_(p1) = G_MERGE_VALUES [[COPY18]](s32), [[COPY19]](s32)
-  ; CHECK-NEXT:   ADJCALLSTACKDOWN 0, 0, implicit-def $scc
-  ; CHECK-NEXT:   G_STORE [[C]](s8), [[MV]](p1) :: (store (s8) into %ir.ptr, addrspace 1)
-  ; CHECK-NEXT:   [[COPY20:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY8]]
-  ; CHECK-NEXT:   S_SETPC_B64_return [[COPY20]]
-  %ptr = call align 1 i8 addrspace(1)* @returns_ptr()
-  store i8 0, i8 addrspace(1)* %ptr, align 1
-  ret void
-}
-
-define void @call_result_align_8() {
-  ; CHECK-LABEL: name: call_result_align_8
-  ; CHECK: bb.1 (%ir-block.0):
-  ; CHECK-NEXT:   liveins: $sgpr12, $sgpr13, $sgpr14, $vgpr31, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9, $sgpr10_sgpr11, $sgpr30_sgpr31
-  ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:vgpr_32(s32) = COPY $vgpr31
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:sgpr_32 = COPY $sgpr14
-  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:sgpr_32 = COPY $sgpr13
-  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:sgpr_32 = COPY $sgpr12
-  ; CHECK-NEXT:   [[COPY4:%[0-9]+]]:sgpr_64 = COPY $sgpr10_sgpr11
-  ; CHECK-NEXT:   [[COPY5:%[0-9]+]]:sgpr_64 = COPY $sgpr8_sgpr9
-  ; CHECK-NEXT:   [[COPY6:%[0-9]+]]:sgpr_64 = COPY $sgpr6_sgpr7
-  ; CHECK-NEXT:   [[COPY7:%[0-9]+]]:sgpr_64 = COPY $sgpr4_sgpr5
-  ; CHECK-NEXT:   [[COPY8:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
-  ; CHECK-NEXT:   [[C:%[0-9]+]]:_(s8) = G_CONSTANT i8 0
-  ; CHECK-NEXT:   ADJCALLSTACKUP 0, 0, implicit-def $scc
-  ; CHECK-NEXT:   [[GV:%[0-9]+]]:_(p0) = G_GLOBAL_VALUE @returns_ptr
-  ; CHECK-NEXT:   [[COPY9:%[0-9]+]]:_(p4) = COPY [[COPY7]]
-  ; CHECK-NEXT:   [[COPY10:%[0-9]+]]:_(p4) = COPY [[COPY6]]
-  ; CHECK-NEXT:   [[COPY11:%[0-9]+]]:_(p4) = COPY [[COPY5]]
-  ; CHECK-NEXT:   [[COPY12:%[0-9]+]]:_(s64) = COPY [[COPY4]]
-  ; CHECK-NEXT:   [[COPY13:%[0-9]+]]:_(s32) = COPY [[COPY3]]
-  ; CHECK-NEXT:   [[COPY14:%[0-9]+]]:_(s32) = COPY [[COPY2]]
-  ; CHECK-NEXT:   [[COPY15:%[0-9]+]]:_(s32) = COPY [[COPY1]]
-  ; CHECK-NEXT:   [[COPY16:%[0-9]+]]:_(s32) = COPY [[COPY]](s32)
-  ; CHECK-NEXT:   [[COPY17:%[0-9]+]]:_(<4 x s32>) = COPY $sgpr0_sgpr1_sgpr2_sgpr3
-  ; CHECK-NEXT:   $sgpr0_sgpr1_sgpr2_sgpr3 = COPY [[COPY17]](<4 x s32>)
-  ; CHECK-NEXT:   $sgpr4_sgpr5 = COPY [[COPY9]](p4)
-  ; CHECK-NEXT:   $sgpr6_sgpr7 = COPY [[COPY10]](p4)
-  ; CHECK-NEXT:   $sgpr8_sgpr9 = COPY [[COPY11]](p4)
-  ; CHECK-NEXT:   $sgpr10_sgpr11 = COPY [[COPY12]](s64)
-  ; CHECK-NEXT:   $sgpr12 = COPY [[COPY13]](s32)
-  ; CHECK-NEXT:   $sgpr13 = COPY [[COPY14]](s32)
-  ; CHECK-NEXT:   $sgpr14 = COPY [[COPY15]](s32)
-  ; CHECK-NEXT:   $vgpr31 = COPY [[COPY16]](s32)
-  ; CHECK-NEXT:   $sgpr30_sgpr31 = G_SI_CALL [[GV]](p0), @returns_ptr, csr_amdgpu_highregs, implicit $sgpr0_sgpr1_sgpr2_sgpr3, implicit $sgpr4_sgpr5, implicit $sgpr6_sgpr7, implicit $sgpr8_sgpr9, implicit $sgpr10_sgpr11, implicit $sgpr12, implicit $sgpr13, implicit $sgpr14, implicit $vgpr31, implicit-def $vgpr0, implicit-def $vgpr1
-  ; CHECK-NEXT:   [[COPY18:%[0-9]+]]:_(s32) = COPY $vgpr0
-  ; CHECK-NEXT:   [[COPY19:%[0-9]+]]:_(s32) = COPY $vgpr1
-  ; CHECK-NEXT:   [[MV:%[0-9]+]]:_(p1) = G_MERGE_VALUES [[COPY18]](s32), [[COPY19]](s32)
-  ; CHECK-NEXT:   ADJCALLSTACKDOWN 0, 0, implicit-def $scc
-  ; CHECK-NEXT:   [[ASSERT_ALIGN:%[0-9]+]]:_(p1) = G_ASSERT_ALIGN [[MV]], 8
-  ; CHECK-NEXT:   G_STORE [[C]](s8), [[ASSERT_ALIGN]](p1) :: (store (s8) into %ir.ptr, align 8, addrspace 1)
-  ; CHECK-NEXT:   [[COPY20:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY8]]
-  ; CHECK-NEXT:   S_SETPC_B64_return [[COPY20]]
-  %ptr = call align 8 i8 addrspace(1)* @returns_ptr()
-  store i8 0, i8 addrspace(1)* %ptr, align 8
-  ret void
-}
-
-define void @declaration_result_align_8() {
-  ; CHECK-LABEL: name: declaration_result_align_8
-  ; CHECK: bb.1 (%ir-block.0):
-  ; CHECK-NEXT:   liveins: $sgpr12, $sgpr13, $sgpr14, $vgpr31, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9, $sgpr10_sgpr11, $sgpr30_sgpr31
-  ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:vgpr_32(s32) = COPY $vgpr31
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:sgpr_32 = COPY $sgpr14
-  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:sgpr_32 = COPY $sgpr13
-  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:sgpr_32 = COPY $sgpr12
-  ; CHECK-NEXT:   [[COPY4:%[0-9]+]]:sgpr_64 = COPY $sgpr10_sgpr11
-  ; CHECK-NEXT:   [[COPY5:%[0-9]+]]:sgpr_64 = COPY $sgpr8_sgpr9
-  ; CHECK-NEXT:   [[COPY6:%[0-9]+]]:sgpr_64 = COPY $sgpr6_sgpr7
-  ; CHECK-NEXT:   [[COPY7:%[0-9]+]]:sgpr_64 = COPY $sgpr4_sgpr5
-  ; CHECK-NEXT:   [[COPY8:%[0-9]+]]:sgpr_64 = COPY $sgpr30_sgpr31
-  ; CHECK-NEXT:   [[C:%[0-9]+]]:_(s8) = G_CONSTANT i8 0
-  ; CHECK-NEXT:   ADJCALLSTACKUP 0, 0, implicit-def $scc
-  ; CHECK-NEXT:   [[GV:%[0-9]+]]:_(p0) = G_GLOBAL_VALUE @returns_ptr_align8
-  ; CHECK-NEXT:   [[COPY9:%[0-9]+]]:_(p4) = COPY [[COPY7]]
-  ; CHECK-NEXT:   [[COPY10:%[0-9]+]]:_(p4) = COPY [[COPY6]]
-  ; CHECK-NEXT:   [[COPY11:%[0-9]+]]:_(p4) = COPY [[COPY5]]
-  ; CHECK-NEXT:   [[COPY12:%[0-9]+]]:_(s64) = COPY [[COPY4]]
-  ; CHECK-NEXT:   [[COPY13:%[0-9]+]]:_(s32) = COPY [[COPY3]]
-  ; CHECK-NEXT:   [[COPY14:%[0-9]+]]:_(s32) = COPY [[COPY2]]
-  ; CHECK-NEXT:   [[COPY15:%[0-9]+]]:_(s32) = COPY [[COPY1]]
-  ; CHECK-NEXT:   [[COPY16:%[0-9]+]]:_(s32) = COPY [[COPY]](s32)
-  ; CHECK-NEXT:   [[COPY17:%[0-9]+]]:_(<4 x s32>) = COPY $sgpr0_sgpr1_sgpr2_sgpr3
-  ; CHECK-NEXT:   $sgpr0_sgpr1_sgpr2_sgpr3 = COPY [[COPY17]](<4 x s32>)
-  ; CHECK-NEXT:   $sgpr4_sgpr5 = COPY [[COPY9]](p4)
-  ; CHECK-NEXT:   $sgpr6_sgpr7 = COPY [[COPY10]](p4)
-  ; CHECK-NEXT:   $sgpr8_sgpr9 = COPY [[COPY11]](p4)
-  ; CHECK-NEXT:   $sgpr10_sgpr11 = COPY [[COPY12]](s64)
-  ; CHECK-NEXT:   $sgpr12 = COPY [[COPY13]](s32)
-  ; CHECK-NEXT:   $sgpr13 = COPY [[COPY14]](s32)
-  ; CHECK-NEXT:   $sgpr14 = COPY [[COPY15]](s32)
-  ; CHECK-NEXT:   $vgpr31 = COPY [[COPY16]](s32)
-  ; CHECK-NEXT:   $sgpr30_sgpr31 = G_SI_CALL [[GV]](p0), @returns_ptr_align8, csr_amdgpu_highregs, implicit $sgpr0_sgpr1_sgpr2_sgpr3, implicit $sgpr4_sgpr5, implicit $sgpr6_sgpr7, implicit $sgpr8_sgpr9, implicit $sgpr10_sgpr11, implicit $sgpr12, implicit $sgpr13, implicit $sgpr14, implicit $vgpr31, implicit-def $vgpr0, implicit-def $vgpr1
-  ; CHECK-NEXT:   [[COPY18:%[0-9]+]]:_(s32) = COPY $vgpr0
-  ; CHECK-NEXT:   [[COPY19:%[0-9]+]]:_(s32) = COPY $vgpr1
-  ; CHECK-NEXT:   [[MV:%[0-9]+]]:_(p1) = G_MERGE_VALUES [[COPY18]](s32), [[COPY19]](s32)
-  ; CHECK-NEXT:   ADJCALLSTACKDOWN 0, 0, implicit-def $scc
-  ; CHECK-NEXT:   [[ASSERT_ALIGN:%[0-9]+]]:_(p1) = G_ASSERT_ALIGN [[MV]], 8
-  ; CHECK-NEXT:   G_STORE [[C]](s8), [[ASSERT_ALIGN]](p1) :: (store (s8) into %ir.ptr, align 8, addrspace 1)
-  ; CHECK-NEXT:   [[COPY20:%[0-9]+]]:ccr_sgpr_64 = COPY [[COPY8]]
-  ; CHECK-NEXT:   S_SETPC_B64_return [[COPY20]]
-  %ptr = call i8 addrspace(1)* @returns_ptr_align8()
-  store i8 0, i8 addrspace(1)* %ptr, align 8
-  ret void
-}
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/regbankselect-assert-align.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/regbankselect-assert-align.mir
deleted file mode 100644 (file)
index 783a1e9..0000000
+++ /dev/null
@@ -1,62 +0,0 @@
-# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
-# RUN: llc -march=amdgcn -mcpu=gfx90a -run-pass=regbankselect %s -verify-machineinstrs -o - | FileCheck %s
-
----
-name:            assert_align_vgpr
-alignment:       4
-legalized:       true
-tracksRegLiveness: true
-body:             |
-  bb.0:
-    liveins: $vgpr0_vgpr1
-
-    ; CHECK-LABEL: name: assert_align_vgpr
-    ; CHECK: liveins: $vgpr0_vgpr1
-    ; CHECK-NEXT: {{  $}}
-    ; CHECK-NEXT: %copy:vgpr(p1) = COPY $vgpr0_vgpr1
-    ; CHECK-NEXT: %assert_align:vgpr(p1) = G_ASSERT_ALIGN %copy, 4
-    ; CHECK-NEXT: S_ENDPGM 0, implicit %assert_align(p1)
-    %copy:_(p1) = COPY $vgpr0_vgpr1
-    %assert_align:_(p1) = G_ASSERT_ALIGN %copy, 4
-    S_ENDPGM 0, implicit %assert_align
-...
-
----
-name:            assert_align_sgpr
-alignment:       4
-legalized:       true
-tracksRegLiveness: true
-body:             |
-  bb.0:
-    liveins: $sgpr8_sgpr9
-
-    ; CHECK-LABEL: name: assert_align_sgpr
-    ; CHECK: liveins: $sgpr8_sgpr9
-    ; CHECK-NEXT: {{  $}}
-    ; CHECK-NEXT: %copy:sgpr(p1) = COPY $sgpr8_sgpr9
-    ; CHECK-NEXT: %assert_align:sgpr(p1) = G_ASSERT_ALIGN %copy, 4
-    ; CHECK-NEXT: S_ENDPGM 0, implicit %assert_align(p1)
-    %copy:_(p1) = COPY $sgpr8_sgpr9
-    %assert_align:_(p1) = G_ASSERT_ALIGN %copy, 4
-    S_ENDPGM 0, implicit %assert_align
-...
-
----
-name:            assert_align_agpr
-alignment:       4
-legalized:       true
-tracksRegLiveness: true
-body:             |
-  bb.0:
-    liveins: $agpr0_agpr1
-
-    ; CHECK-LABEL: name: assert_align_agpr
-    ; CHECK: liveins: $agpr0_agpr1
-    ; CHECK-NEXT: {{  $}}
-    ; CHECK-NEXT: %copy:agpr(p1) = COPY $agpr0_agpr1
-    ; CHECK-NEXT: %assert_align:agpr(p1) = G_ASSERT_ALIGN %copy, 4
-    ; CHECK-NEXT: S_ENDPGM 0, implicit %assert_align(p1)
-    %copy:_(p1) = COPY $agpr0_agpr1
-    %assert_align:_(p1) = G_ASSERT_ALIGN %copy, 4
-    S_ENDPGM 0, implicit %assert_align
-...
index 7a2fc0f..f5594d0 100644 (file)
@@ -1917,58 +1917,3 @@ TEST_F(AMDGPUGISelMITest, TestNumSignBitsSBFX) {
   EXPECT_EQ(1u, Info.computeNumSignBits(CopyUnkValBfxReg));
   EXPECT_EQ(1u, Info.computeNumSignBits(CopyUnkOffBfxReg));
 }
-
-TEST_F(AMDGPUGISelMITest, TestKnownBitsAssertAlign) {
-  StringRef MIRString = R"MIR(
-   %val:_(s64) = COPY $vgpr0_vgpr1
-   %ptrval:_(p1) = COPY $vgpr0_vgpr1
-
-   %assert_align0:_(s64) = G_ASSERT_ALIGN %val, 0
-   %copy_assert_align0:_(s64) = COPY %assert_align0
-
-   %assert_align1:_(s64) = G_ASSERT_ALIGN %val, 1
-   %copy_assert_align1:_(s64) = COPY %assert_align1
-
-   %assert_align2:_(s64) = G_ASSERT_ALIGN %val, 2
-   %copy_assert_align2:_(s64) = COPY %assert_align2
-
-   %assert_align3:_(s64) = G_ASSERT_ALIGN %val, 3
-   %copy_assert_align3:_(s64) = COPY %assert_align3
-
-   %assert_align8:_(s64) = G_ASSERT_ALIGN %val, 8
-   %copy_assert_align8:_(s64) = COPY %assert_align8
-
-   %assert_maxalign:_(s64) = G_ASSERT_ALIGN %val, 30
-   %copy_assert_maxalign:_(s64) = COPY %assert_maxalign
-
-   %assert_ptr_align5:_(p1) = G_ASSERT_ALIGN %ptrval, 5
-   %copy_assert_ptr_align5:_(p1) = COPY %assert_ptr_align5
-)MIR";
-  setUp(MIRString);
-  if (!TM)
-    return;
-  GISelKnownBits Info(*MF);
-
-  KnownBits Res;
-  auto GetKB = [&](unsigned Idx) {
-    Register CopyReg = Copies[Idx];
-    auto *Copy = MRI->getVRegDef(CopyReg);
-    return Info.getKnownBits(Copy->getOperand(1).getReg());
-  };
-
-  auto CheckBits = [&](unsigned NumBits, unsigned Idx) {
-    Res = GetKB(Idx);
-    EXPECT_EQ(64u, Res.getBitWidth());
-    EXPECT_EQ(NumBits, Res.Zero.countTrailingOnes());
-    EXPECT_EQ(64u, Res.One.countTrailingZeros());
-    EXPECT_EQ(Align(1ull << NumBits), Info.computeKnownAlignment(Copies[Idx]));
-  };
-
-  CheckBits(0, Copies.size() - 7);
-  CheckBits(1, Copies.size() - 6);
-  CheckBits(2, Copies.size() - 5);
-  CheckBits(3, Copies.size() - 4);
-  CheckBits(8, Copies.size() - 3);
-  CheckBits(30, Copies.size() - 2);
-  CheckBits(5, Copies.size() - 1);
-}