namespace mlir {
-class ModuleOp;
+class Operation;
/// A simple object cache following Lang's LLJITWithObjectCache example.
class SimpleObjectCache : public llvm::ObjectCache {
};
struct ExecutionEngineOptions {
- /// If `llvmModuleBuilder` is provided, it will be used to create LLVM module
- /// from the given MLIR module. Otherwise, a default `translateModuleToLLVMIR`
- /// function will be used to translate MLIR module to LLVM IR.
- llvm::function_ref<std::unique_ptr<llvm::Module>(ModuleOp,
+ /// If `llvmModuleBuilder` is provided, it will be used to create an LLVM
+ /// module from the given MLIR IR. Otherwise, a default
+ /// `translateModuleToLLVMIR` function will be used to translate to LLVM IR.
+ llvm::function_ref<std::unique_ptr<llvm::Module>(Operation *,
llvm::LLVMContext &)>
llvmModuleBuilder = nullptr;
bool enablePerfNotificationListener = true;
};
-/// JIT-backed execution engine for MLIR modules. Assumes the module can be
-/// converted to LLVM IR. For each function, creates a wrapper function with
-/// the fixed interface
+/// JIT-backed execution engine for MLIR. Assumes the IR can be converted to
+/// LLVM IR. For each function, creates a wrapper function with the fixed
+/// interface
///
/// void _mlir_funcName(void **)
///
ExecutionEngine(bool enableObjectCache, bool enableGDBNotificationListener,
bool enablePerfNotificationListener);
- /// Creates an execution engine for the given module.
+ /// Creates an execution engine for the given MLIR IR.
static llvm::Expected<std::unique_ptr<ExecutionEngine>>
- create(ModuleOp m, const ExecutionEngineOptions &options = {});
+ create(Operation *op, const ExecutionEngineOptions &options = {});
/// Looks up a packed-argument function wrapping the function with the given
/// name and returns a pointer to it. Propagates errors in case of failure.
#include "mlir/IR/MLIRContext.h"
#include "mlir/Parser/Parser.h"
#include "mlir/Support/FileUtilities.h"
+#include "mlir/Tools/ParseUtilties.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ExecutionEngine/Orc/JITTargetMachineBuilder.h"
llvm::cl::opt<bool> hostSupportsJit{"host-supports-jit",
llvm::cl::desc("Report host JIT support"),
llvm::cl::Hidden};
+
+ llvm::cl::opt<bool> noImplicitModule{
+ "no-implicit-module",
+ llvm::cl::desc(
+ "Disable implicit addition of a top-level module op during parsing"),
+ llvm::cl::init(false)};
};
struct CompileAndExecuteConfig {
/// A custom function that is passed to ExecutionEngine. It processes MLIR
/// module and creates LLVM IR module.
- llvm::function_ref<std::unique_ptr<llvm::Module>(ModuleOp,
+ llvm::function_ref<std::unique_ptr<llvm::Module>(Operation *,
llvm::LLVMContext &)>
llvmModuleBuilder;
} // namespace
-static OwningOpRef<ModuleOp> parseMLIRInput(StringRef inputFilename,
- MLIRContext *context) {
+static OwningOpRef<Operation *> parseMLIRInput(StringRef inputFilename,
+ bool insertImplicitModule,
+ MLIRContext *context) {
// Set up the input file.
std::string errorMessage;
auto file = openInputFile(inputFilename, &errorMessage);
llvm::SourceMgr sourceMgr;
sourceMgr.AddNewSourceBuffer(std::move(file), SMLoc());
- return parseSourceFile<ModuleOp>(sourceMgr, context);
+ OwningOpRef<Operation *> module =
+ parseSourceFileForTool(sourceMgr, context, insertImplicitModule);
+ if (!module)
+ return nullptr;
+ if (!module.get()->hasTrait<OpTrait::SymbolTable>()) {
+ llvm::errs() << "Error: top-level op must be a symbol table.\n";
+ return nullptr;
+ }
+ return module;
}
static inline Error makeStringError(const Twine &message) {
}
// JIT-compile the given module and run "entryPoint" with "args" as arguments.
-static Error compileAndExecute(Options &options, ModuleOp module,
+static Error compileAndExecute(Options &options, Operation *module,
StringRef entryPoint,
CompileAndExecuteConfig config, void **args) {
Optional<llvm::CodeGenOpt::Level> jitCodeGenOptLevel;
return Error::success();
}
-static Error compileAndExecuteVoidFunction(Options &options, ModuleOp module,
+static Error compileAndExecuteVoidFunction(Options &options, Operation *module,
StringRef entryPoint,
CompileAndExecuteConfig config) {
- auto mainFunction = module.lookupSymbol<LLVM::LLVMFuncOp>(entryPoint);
+ auto mainFunction = dyn_cast_or_null<LLVM::LLVMFuncOp>(
+ SymbolTable::lookupSymbolIn(module, entryPoint));
if (!mainFunction || mainFunction.empty())
return makeStringError("entry point not found");
void *empty = nullptr;
return Error::success();
}
template <typename Type>
-Error compileAndExecuteSingleReturnFunction(Options &options, ModuleOp module,
+Error compileAndExecuteSingleReturnFunction(Options &options, Operation *module,
StringRef entryPoint,
CompileAndExecuteConfig config) {
- auto mainFunction = module.lookupSymbol<LLVM::LLVMFuncOp>(entryPoint);
+ auto mainFunction = dyn_cast_or_null<LLVM::LLVMFuncOp>(
+ SymbolTable::lookupSymbolIn(module, entryPoint));
if (!mainFunction || mainFunction.isExternal())
return makeStringError("entry point not found");
MLIRContext context(registry);
- auto m = parseMLIRInput(options.inputFilename, &context);
+ auto m = parseMLIRInput(options.inputFilename, !options.noImplicitModule,
+ &context);
if (!m) {
llvm::errs() << "could not parse the input IR\n";
return 1;
// Get the function used to compile and execute the module.
using CompileAndExecuteFnT =
- Error (*)(Options &, ModuleOp, StringRef, CompileAndExecuteConfig);
+ Error (*)(Options &, Operation *, StringRef, CompileAndExecuteConfig);
auto compileAndExecuteFn =
StringSwitch<CompileAndExecuteFnT>(options.mainFuncType.getValue())
.Case("i32", compileAndExecuteSingleReturnFunction<int32_t>)
/// Each of these two modules is translated to LLVM IR module, then they are
/// linked together and returned.
static std::unique_ptr<llvm::Module>
-convertMLIRModule(ModuleOp module, llvm::LLVMContext &context) {
+convertMLIRModule(Operation *op, llvm::LLVMContext &context) {
+ auto module = dyn_cast<ModuleOp>(op);
+ if (!module)
+ return op->emitError("op must be a 'builtin.module"), nullptr;
// Verify that there is only one nested module.
auto modules = module.getOps<ModuleOp>();
if (!llvm::hasSingleElement(modules)) {
return mainModule;
}
-static LogicalResult runMLIRPasses(ModuleOp module) {
- PassManager passManager(module.getContext());
+static LogicalResult runMLIRPasses(Operation *module) {
+ PassManager passManager(module->getContext(),
+ module->getName().getStringRef());
applyPassManagerCLOptions(passManager);
passManager.addPass(createGpuKernelOutliningPass());
passManager.addPass(createConvertGPUToSPIRVPass(/*mapMemorySpace=*/true));