return false;
}
+
+MachineInstr *Thumb1InstrInfo::foldMemoryOperandImpl(
+ MachineFunction &MF, MachineInstr &MI, ArrayRef<unsigned> Ops,
+ MachineBasicBlock::iterator InsertPt, MachineInstr &LoadMI,
+ LiveIntervals *LIS) const {
+ // Replace:
+ // ldr Rd, func address
+ // blx Rd
+ // with:
+ // bl func
+
+ if (MI.getOpcode() == ARM::tBLXr && LoadMI.getOpcode() == ARM::tLDRpci &&
+ MI.getParent() == LoadMI.getParent()) {
+ unsigned CPI = LoadMI.getOperand(1).getIndex();
+ const MachineConstantPool *MCP = MF.getConstantPool();
+ if (CPI >= MCP->getConstants().size())
+ return nullptr;
+ const MachineConstantPoolEntry &CPE = MCP->getConstants()[CPI];
+ assert(!CPE.isMachineConstantPoolEntry() && "Invalid constpool entry");
+ const Function *Callee = dyn_cast<Function>(CPE.Val.ConstVal);
+ if (!Callee)
+ return nullptr;
+ const char *FuncName = MF.createExternalSymbolName(Callee->getName());
+ MachineInstrBuilder MIB =
+ BuildMI(*MI.getParent(), InsertPt, MI.getDebugLoc(), get(ARM::tBL))
+ .add(predOps(ARMCC::AL))
+ .addExternalSymbol(FuncName);
+ for (auto &MO : MI.implicit_operands())
+ MIB.add(MO);
+ return MIB.getInstr();
+ }
+
+ return nullptr;
+}
const TargetRegisterInfo *TRI) const override;
bool canCopyGluedNodeDuringSchedule(SDNode *N) const override;
+
+protected:
+ virtual MachineInstr *foldMemoryOperandImpl(
+ MachineFunction &MF, MachineInstr &MI, ArrayRef<unsigned> Ops,
+ MachineBasicBlock::iterator InsertPt, MachineInstr &LoadMI,
+ LiveIntervals *LIS = nullptr) const override;
+
private:
void expandLoadStackGuard(MachineBasicBlock::iterator MI) const override;
};
--- /dev/null
+; RUN: llc < %s | FileCheck %s
+
+target datalayout = "e-m:e-p:32:32-i64:64-v128:64:128-a:0:32-n32-S64"
+target triple = "thumbv6m-arm-none-eabi"
+
+; CHECK-LABEL: f:
+; CHECK: bl g
+; CHECK: blx r
+; CHECK: bl g
+; CHECK: bl g
+define void @f(i32* %p, i32 %x, i32 %y, i32 %z) minsize optsize {
+entry:
+ call void @g(i32* %p, i32 %x, i32 %y, i32 %z)
+ call void @g(i32* %p, i32 %x, i32 %y, i32 %z)
+ call void @g(i32* %p, i32 %x, i32 %y, i32 %z)
+ call void @g(i32* %p, i32 %x, i32 %y, i32 %z)
+ ret void
+}
+
+declare void @g(i32*,i32,i32,i32)
--- /dev/null
+; RUN: llc < %s
+; Verify that we don't crash on indirect function calls
+; in Thumb1InstrInfo::foldMemoryOperand.
+
+target datalayout = "e-m:e-p:32:32-i64:64-v128:64:128-a:0:32-n32-S64"
+target triple = "thumbv6m-arm-none-eabi"
+
+; Function Attrs: minsize nounwind optsize
+define void @test(i32* %p, i32 %x, i32 %y, i32 %z) {
+entry:
+ tail call void inttoptr (i32 19088743 to void (i32*, i32, i32, i32)*)(i32* %p, i32 %x, i32 %y, i32 %z)
+ tail call void inttoptr (i32 19088743 to void (i32*, i32, i32, i32)*)(i32* %p, i32 %x, i32 %y, i32 %z)
+ tail call void inttoptr (i32 19088743 to void (i32*, i32, i32, i32)*)(i32* %p, i32 %x, i32 %y, i32 %z)
+ tail call void inttoptr (i32 19088743 to void (i32*, i32, i32, i32)*)(i32* %p, i32 %x, i32 %y, i32 %z)
+ ret void
+}