return true;
}
+
+// Return true if get the base operand, byte offset of an instruction and the
+// memory width. Width is the size of memory that is being loaded/stored.
+bool RISCVInstrInfo::getMemOperandWithOffsetWidth(
+ const MachineInstr &LdSt, const MachineOperand *&BaseReg, int64_t &Offset,
+ unsigned &Width, const TargetRegisterInfo *TRI) const {
+ assert(LdSt.mayLoadOrStore() && "Expected a memory operation.");
+
+ // Here we assume the standard RISC-V ISA, which uses a base+offset
+ // addressing mode. You'll need to relax these conditions to support custom
+ // load/stores instructions.
+ if (LdSt.getNumExplicitOperands() != 3)
+ return false;
+ if (!LdSt.getOperand(1).isReg() || !LdSt.getOperand(2).isImm())
+ return false;
+
+ if (!LdSt.hasOneMemOperand())
+ return false;
+
+ Width = (*LdSt.memoperands_begin())->getSize();
+ BaseReg = &LdSt.getOperand(1);
+ Offset = LdSt.getOperand(2).getImm();
+ return true;
+}
+
+bool RISCVInstrInfo::areMemAccessesTriviallyDisjoint(
+ const MachineInstr &MIa, const MachineInstr &MIb) const {
+ assert(MIa.mayLoadOrStore() && "MIa must be a load or store.");
+ assert(MIb.mayLoadOrStore() && "MIb must be a load or store.");
+
+ if (MIa.hasUnmodeledSideEffects() || MIb.hasUnmodeledSideEffects() ||
+ MIa.hasOrderedMemoryRef() || MIb.hasOrderedMemoryRef())
+ return false;
+
+ // Retrieve the base register, offset from the base register and width. Width
+ // is the size of memory that is being loaded/stored (e.g. 1, 2, 4). If
+ // base registers are identical, and the offset of a lower memory access +
+ // the width doesn't overlap the offset of a higher memory access,
+ // then the memory accesses are different.
+ const TargetRegisterInfo *TRI = STI.getRegisterInfo();
+ const MachineOperand *BaseOpA = nullptr, *BaseOpB = nullptr;
+ int64_t OffsetA = 0, OffsetB = 0;
+ unsigned int WidthA = 0, WidthB = 0;
+ if (getMemOperandWithOffsetWidth(MIa, BaseOpA, OffsetA, WidthA, TRI) &&
+ getMemOperandWithOffsetWidth(MIb, BaseOpB, OffsetB, WidthB, TRI)) {
+ if (BaseOpA->isIdenticalTo(*BaseOpB)) {
+ int LowOffset = std::min(OffsetA, OffsetB);
+ int HighOffset = std::max(OffsetA, OffsetB);
+ int LowWidth = (LowOffset == OffsetA) ? WidthA : WidthB;
+ if (LowOffset + LowWidth <= HighOffset)
+ return true;
+ }
+ }
+ return false;
+}
bool verifyInstruction(const MachineInstr &MI,
StringRef &ErrInfo) const override;
+ bool getMemOperandWithOffsetWidth(const MachineInstr &LdSt,
+ const MachineOperand *&BaseOp,
+ int64_t &Offset, unsigned &Width,
+ const TargetRegisterInfo *TRI) const;
+
+ bool areMemAccessesTriviallyDisjoint(const MachineInstr &MIa,
+ const MachineInstr &MIb) const override;
+
protected:
const RISCVSubtarget &STI;
};
--- /dev/null
+; REQUIRES: asserts
+; RUN: llc -mtriple=riscv32 -debug-only=machine-scheduler < %s \
+; RUN: -o /dev/null 2>&1 | FileCheck %s
+; RUN: llc -mtriple=riscv64 -debug-only=machine-scheduler < %s \
+; RUN: -o /dev/null 2>&1 | FileCheck %s
+
+; This test exercises the areMemAccessesTriviallyDisjoint hook.
+; Test that the two stores are disjoint memory accesses. If the corresponding
+; store machine instructions don't depend on each other, the second store should
+; not appear in the successors list of the first one and the first one should
+; not appear on the predecessors list of the second one.
+define i32 @test_disjoint(i32* %P, i32 %v) {
+entry:
+; CHECK: ********** MI Scheduling **********
+; CHECK-LABEL: test_disjoint:%bb.0
+; CHECK:SU(2): SW %1:gpr, %0:gpr, 12 :: (store 4 into %ir.arrayidx)
+; CHECK-NOT: Successors:
+; CHECK:SU(3): SW %1:gpr, %0:gpr, 8 :: (store 4 into %ir.arrayidx1)
+; CHECK: Predecessors:
+; CHECK-NOT: SU(2): Ord Latency=0 Memory
+ %arrayidx = getelementptr inbounds i32, i32* %P, i32 3
+ store i32 %v, i32* %arrayidx
+ %arrayidx1 = getelementptr inbounds i32, i32* %P, i32 2
+ store i32 %v, i32* %arrayidx1
+ ret i32 %v
+}