void initializeAMDGPULowerKernelAttributesPass(PassRegistry &);
extern char &AMDGPULowerKernelAttributesID;
+struct AMDGPULowerKernelAttributesPass
+ : PassInfoMixin<AMDGPULowerKernelAttributesPass> {
+ PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
+};
+
void initializeAMDGPUPropagateAttributesEarlyPass(PassRegistry &);
extern char &AMDGPUPropagateAttributesEarlyID;
#include "llvm/CodeGen/TargetPassConfig.h"
#include "llvm/IR/Constants.h"
#include "llvm/IR/Function.h"
+#include "llvm/IR/InstIterator.h"
#include "llvm/IR/Instructions.h"
+#include "llvm/IR/PassManager.h"
#include "llvm/IR/PatternMatch.h"
#include "llvm/Pass.h"
};
class AMDGPULowerKernelAttributes : public ModulePass {
- Module *Mod = nullptr;
-
public:
static char ID;
AMDGPULowerKernelAttributes() : ModulePass(ID) {}
- bool processUse(CallInst *CI);
-
- bool doInitialization(Module &M) override;
bool runOnModule(Module &M) override;
StringRef getPassName() const override {
} // end anonymous namespace
-bool AMDGPULowerKernelAttributes::doInitialization(Module &M) {
- Mod = &M;
- return false;
-}
-
-bool AMDGPULowerKernelAttributes::processUse(CallInst *CI) {
+static bool processUse(CallInst *CI) {
Function *F = CI->getParent()->getParent();
auto MD = F->getMetadata("reqd_work_group_size");
Value *GridSizeY = nullptr;
Value *GridSizeZ = nullptr;
- const DataLayout &DL = Mod->getDataLayout();
+ const DataLayout &DL = F->getParent()->getDataLayout();
// We expect to see several GEP users, casted to the appropriate type and
// loaded.
StringRef DispatchPtrName
= Intrinsic::getName(Intrinsic::amdgcn_dispatch_ptr);
- Function *DispatchPtr = Mod->getFunction(DispatchPtrName);
+ Function *DispatchPtr = M.getFunction(DispatchPtrName);
if (!DispatchPtr) // Dispatch ptr not used.
return false;
ModulePass *llvm::createAMDGPULowerKernelAttributesPass() {
return new AMDGPULowerKernelAttributes();
}
+
+PreservedAnalyses
+AMDGPULowerKernelAttributesPass::run(Function &F, FunctionAnalysisManager &AM) {
+ StringRef DispatchPtrName =
+ Intrinsic::getName(Intrinsic::amdgcn_dispatch_ptr);
+
+ Function *DispatchPtr = F.getParent()->getFunction(DispatchPtrName);
+ if (!DispatchPtr) // Dispatch ptr not used.
+ return PreservedAnalyses::all();
+
+ for (Instruction &I : instructions(F)) {
+ if (CallInst *CI = dyn_cast<CallInst>(&I)) {
+ if (CI->getCalledFunction() == DispatchPtr)
+ processUse(CI);
+ }
+ }
+
+ return PreservedAnalyses::all();
+}
PM.addPass(AMDGPUPromoteAllocaToVectorPass(*this));
return true;
}
+ if (PassName == "amdgpu-lower-kernel-attributes") {
+ PM.addPass(AMDGPULowerKernelAttributesPass());
+ return true;
+ }
return false;
});
// but before SROA to increase SROA opportunities.
FPM.addPass(InferAddressSpacesPass());
+ // This should run after inlining to have any chance of doing
+ // anything, and before other cleanup optimizations.
+ FPM.addPass(AMDGPULowerKernelAttributesPass());
+
if (Level != PassBuilder::OptimizationLevel::O0) {
// Promote alloca to vector before SROA and loop unroll. If we
// manage to eliminate allocas before unroll we may choose to unroll
static bool shouldPinPassToLegacyPM(StringRef Pass) {
std::vector<StringRef> PassNameExactToIgnore = {
"amdgpu-simplifylib", "amdgpu-usenative", "amdgpu-promote-alloca",
- "amdgpu-promote-alloca-to-vector"};
+ "amdgpu-promote-alloca-to-vector", "amdgpu-lower-kernel-attributes"};
for (const auto &P : PassNameExactToIgnore)
if (Pass == P)
return false;