bool isNoNanSrc(SDValue N) const;
bool isInlineImmediate(const SDNode *N) const;
bool isVGPRImm(const SDNode *N) const;
+ bool isUniformLoad(const SDNode *N) const;
bool isUniformBr(const SDNode *N) const;
MachineSDNode *buildSMovImm64(SDLoc &DL, uint64_t Val, EVT VT) const;
return !AllUsesAcceptSReg && (Limit < 10);
}
+bool AMDGPUDAGToDAGISel::isUniformLoad(const SDNode * N) const {
+ auto Ld = cast<LoadSDNode>(N);
+
+ return Ld->getAlignment() >= 4 &&
+ (
+ (
+ (
+ Ld->getAddressSpace() == AMDGPUAS::CONSTANT_ADDRESS ||
+ Ld->getAddressSpace() == AMDGPUAS::CONSTANT_ADDRESS_32BIT
+ )
+ &&
+ !N->isDivergent()
+ )
+ ||
+ (
+ Subtarget->getScalarizeGlobalBehavior() &&
+ Ld->getAddressSpace() == AMDGPUAS::GLOBAL_ADDRESS &&
+ !Ld->isVolatile() &&
+ !N->isDivergent() &&
+ static_cast<const SITargetLowering *>(
+ getTargetLowering())->isMemOpHasNoClobberedMemOperand(N)
+ )
+ );
+}
void AMDGPUDAGToDAGISel::PostprocessISelDAG() {
const AMDGPUTargetLowering& Lowering =
//===----------------------------------------------------------------------===//
-def smrd_load : PatFrag <(ops node:$ptr), (load node:$ptr), [{
- auto Ld = cast<LoadSDNode>(N);
- return Ld->getAlignment() >= 4 &&
- ((((Ld->getAddressSpace() == AMDGPUAS::CONSTANT_ADDRESS) || (Ld->getAddressSpace() == AMDGPUAS::CONSTANT_ADDRESS_32BIT)) && !N->isDivergent()) ||
- (Subtarget->getScalarizeGlobalBehavior() && Ld->getAddressSpace() == AMDGPUAS::GLOBAL_ADDRESS &&
- !Ld->isVolatile() && !N->isDivergent() &&
- static_cast<const SITargetLowering *>(getTargetLowering())->isMemOpHasNoClobberedMemOperand(N)));
-}]>;
+def smrd_load : PatFrag <(ops node:$ptr), (load node:$ptr), [{ return isUniformLoad(N);}]>;
def SMRDImm : ComplexPattern<i64, 2, "SelectSMRDImm">;
def SMRDImm32 : ComplexPattern<i64, 2, "SelectSMRDImm32">;